code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
def __a ( __lowerCamelCase, __lowerCamelCase ):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(100, 0.25) = }""")
print(f"""{price_plus_tax(125.50, 0.05) = }""")
| 61 | class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = name
lowerCAmelCase = value
lowerCAmelCase = weight
def __repr__( self ) ->str:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
return self.value
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
return self.name
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
return self.weight
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
return self.value / self.weight
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int:
lowerCAmelCase = []
for i in range(len(snake_case__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
lowerCAmelCase = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ )
lowerCAmelCase = []
lowerCAmelCase , lowerCAmelCase = 0.0, 0.0
for i in range(len(snake_case__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
from __future__ import annotations
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =list(range(len(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =[v / w for v, w in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )]
index.sort(key=lambda SCREAMING_SNAKE_CASE__ : ratio[i] , reverse=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =0
__UpperCamelCase =[0] * len(SCREAMING_SNAKE_CASE__ )
for i in index:
if weight[i] <= capacity:
__UpperCamelCase =1
max_value += value[i]
capacity -= weight[i]
else:
__UpperCamelCase =capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowercase__ : Dict = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowercase__ : Optional[int] = [0, 2_5, 5_0]
lowercase__ : Union[str, Any] = [2_5, 5_0, 7_5]
lowercase__ : int = fuzz.membership.trimf(X, abca)
lowercase__ : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowercase__ : List[str] = np.ones(7_5)
lowercase__ : Any = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
lowercase__ : Union[str, Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowercase__ : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowercase__ : Union[str, Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowercase__ : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowercase__ : Any = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowercase__ : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowercase__ : Tuple = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowercase__ : Tuple = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 338 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='gpt_bigcode'
__a =['past_key_values']
__a ={
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , __a : Tuple=5_02_57 , __a : str=10_24 , __a : Dict=7_68 , __a : Tuple=12 , __a : str=12 , __a : Optional[int]=None , __a : Dict="gelu_pytorch_tanh" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=1e-5 , __a : str=0.02 , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[int]=5_02_56 , __a : Optional[int]=5_02_56 , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , **__a : List[Any] , ):
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = scale_attn_weights
_a = use_cache
_a = attention_softmax_in_fpaa
_a = scale_attention_softmax_in_fpaa
_a = multi_query
_a = bos_token_id
_a = eos_token_id
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
| 63 | import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->Optional[Any]:
lowerCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ):
if i == len(__SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase = -1
else:
lowerCAmelCase = timesteps[i + 1]
lowerCAmelCase = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 1, 0]
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
| 338 | 0 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "tokenizer"]
lowercase__ = "AutoImageProcessor"
lowercase__ = "AutoTokenizer"
def __init__( self: List[str], a_: List[str]=None, a_: Tuple=None, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""", a_, )
_snake_case : str = kwargs.pop("""feature_extractor""" )
_snake_case : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a_, a_ )
_snake_case : Dict = self.image_processor
_snake_case : Any = False
def __call__( self: Any, *a_: Any, **a_: Tuple ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*a_, **a_ )
_snake_case : Dict = kwargs.pop("""images""", a_ )
_snake_case : Optional[Any] = kwargs.pop("""text""", a_ )
if len(a_ ) > 0:
_snake_case : Optional[int] = args[0]
_snake_case : Tuple = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_snake_case : Tuple = self.image_processor(a_, *a_, **a_ )
if text is not None:
_snake_case : Tuple = self.tokenizer(a_, **a_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
_snake_case : List[str] = encodings["""input_ids"""]
return inputs
def UpperCamelCase_ ( self: Optional[int], *a_: Tuple, **a_: List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_, **a_ )
def UpperCamelCase_ ( self: int, *a_: List[str], **a_: int ):
'''simple docstring'''
return self.tokenizer.decode(*a_, **a_ )
@contextmanager
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_snake_case : Any = True
_snake_case : Optional[int] = self.tokenizer
yield
_snake_case : int = self.image_processor
_snake_case : Optional[int] = False
def UpperCamelCase_ ( self: Dict, a_: Optional[Any], a_: str=False, a_: Optional[Any]=None ):
'''simple docstring'''
if added_vocab is None:
_snake_case : Dict = self.tokenizer.get_added_vocab()
_snake_case : str = {}
while tokens:
_snake_case : Union[str, Any] = re.search(r"""<s_(.*?)>""", a_, re.IGNORECASE )
if start_token is None:
break
_snake_case : List[Any] = start_token.group(1 )
_snake_case : str = re.search(rf"</s_{key}>", a_, re.IGNORECASE )
_snake_case : Dict = start_token.group()
if end_token is None:
_snake_case : List[Any] = tokens.replace(a_, """""" )
else:
_snake_case : List[str] = end_token.group()
_snake_case : str = re.escape(a_ )
_snake_case : str = re.escape(a_ )
_snake_case : Union[str, Any] = re.search(f"{start_token_escaped}(.*?){end_token_escaped}", a_, re.IGNORECASE )
if content is not None:
_snake_case : int = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_snake_case : List[Any] = self.tokenajson(a_, is_inner_value=a_, added_vocab=a_ )
if value:
if len(a_ ) == 1:
_snake_case : List[str] = value[0]
_snake_case : List[str] = value
else: # leaf nodes
_snake_case : Tuple = []
for leaf in content.split(r"""<sep/>""" ):
_snake_case : Tuple = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_snake_case : int = leaf[1:-2] # for categorical special tokens
output[key].append(a_ )
if len(output[key] ) == 1:
_snake_case : int = output[key][0]
_snake_case : Any = tokens[tokens.find(a_ ) + len(a_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=a_, added_vocab=a_ )
if len(a_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", a_, )
return self.image_processor_class
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", a_, )
return self.image_processor
| 64 | import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase__ : str = logging.get_logger(__name__)
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = """AutoTokenizer"""
UpperCAmelCase_ : Optional[int] = ["""tokenizer"""]
UpperCAmelCase_ : str = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = speaker_embeddings
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , **__SCREAMING_SNAKE_CASE ) ->Tuple:
if speaker_embeddings_dict_path is not None:
lowerCAmelCase = get_file_from_repo(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , )
if speaker_embeddings_path is None:
logger.warning(
F"`{os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
lowerCAmelCase = None
else:
with open(__SCREAMING_SNAKE_CASE ) as speaker_embeddings_json:
lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = None
lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return cls(tokenizer=__SCREAMING_SNAKE_CASE , speaker_embeddings=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , __SCREAMING_SNAKE_CASE="speaker_embeddings" , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ) ->int:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''v2''' ) , exist_ok=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
lowerCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , __SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}.npy" )
lowerCAmelCase = tmp_dict
with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , '''w''' ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = self.speaker_embeddings[voice_preset]
lowerCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
lowerCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , )
if path is None:
raise ValueError(
F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE )
return voice_preset_dict
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None ) ->Tuple:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="pt" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) ->int:
if voice_preset is not None and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE )
else:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not voice_preset.endswith('''.npz''' ):
lowerCAmelCase = voice_preset + '''.npz'''
lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE )
if voice_preset is not None:
self._validate_voice_preset_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCAmelCase = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if voice_preset is not None:
lowerCAmelCase = voice_preset
return encoded_text
| 338 | 0 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCamelCase__ = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
UpperCamelCase__ = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
UpperCamelCase__ = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def lowerCAmelCase_ ( __A, __A ) -> Dict:
'''simple docstring'''
return float((preds == labels).mean() )
def lowerCAmelCase_ ( __A, __A ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = simple_accuracy(__A, __A )
UpperCAmelCase__ = float(fa_score(y_true=__A, y_pred=__A ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase_ ( __A, __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = np.array(__A )
UpperCAmelCase__ = np.array(__A )
UpperCAmelCase__ = en_sentvecs.shape[0]
# mean centering
UpperCAmelCase__ = en_sentvecs - np.mean(__A, axis=0 )
UpperCAmelCase__ = in_sentvecs - np.mean(__A, axis=0 )
UpperCAmelCase__ = cdist(__A, __A, "cosine" )
UpperCAmelCase__ = np.array(range(__A ) )
UpperCAmelCase__ = sim.argsort(axis=1 )[:, :10]
UpperCAmelCase__ = np.any(preds == actual[:, None], axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowercase_ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def lowercase_ (self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__UpperCAmelCase , __UpperCAmelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__UpperCAmelCase , __UpperCAmelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__UpperCAmelCase , __UpperCAmelCase )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 65 | import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 338 | 0 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowercase ):
'''simple docstring'''
random.seed(_lowercase )
np.random.seed(_lowercase )
torch.manual_seed(_lowercase )
torch.cuda.manual_seed_all(_lowercase )
# ^^ safe to call this function even if cuda is not available
class lowerCamelCase :
'''simple docstring'''
def __init__( self: List[Any] , snake_case: Iterable[torch.nn.Parameter] , snake_case: float = 0.9_9_9_9 , snake_case: float = 0.0 , snake_case: int = 0 , snake_case: bool = False , snake_case: Union[float, int] = 1.0 , snake_case: Union[float, int] = 2 / 3 , snake_case: Optional[Any] = None , snake_case: Dict[str, Any] = None , **snake_case: Union[str, Any] , ) -> str:
if isinstance(snake_case , torch.nn.Module ):
snake_case_ :List[str] = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , snake_case , standard_warn=snake_case , )
snake_case_ :Any = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
snake_case_ :Optional[Any] = True
if kwargs.get("""max_value""" , snake_case ) is not None:
snake_case_ :str = """The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , snake_case , standard_warn=snake_case )
snake_case_ :Union[str, Any] = kwargs["""max_value"""]
if kwargs.get("""min_value""" , snake_case ) is not None:
snake_case_ :Dict = """The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , snake_case , standard_warn=snake_case )
snake_case_ :str = kwargs["""min_value"""]
snake_case_ :str = list(snake_case )
snake_case_ :Optional[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("""device""" , snake_case ) is not None:
snake_case_ :Union[str, Any] = """The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , snake_case , standard_warn=snake_case )
self.to(device=kwargs["""device"""] )
snake_case_ :Any = None
snake_case_ :Optional[Any] = decay
snake_case_ :Any = min_decay
snake_case_ :List[Any] = update_after_step
snake_case_ :Optional[Any] = use_ema_warmup
snake_case_ :Optional[Any] = inv_gamma
snake_case_ :int = power
snake_case_ :int = 0
snake_case_ :Dict = None # set in `step()`
snake_case_ :Dict = model_cls
snake_case_ :List[Any] = model_config
@classmethod
def lowerCAmelCase_ ( cls: Optional[int] , snake_case: Tuple , snake_case: Dict ) -> "EMAModel":
snake_case_, snake_case_ :Union[str, Any] = model_cls.load_config(snake_case , return_unused_kwargs=snake_case )
snake_case_ :Tuple = model_cls.from_pretrained(snake_case )
snake_case_ :Any = cls(model.parameters() , model_cls=snake_case , model_config=model.config )
ema_model.load_state_dict(snake_case )
return ema_model
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: List[Any] ) -> str:
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
snake_case_ :List[str] = self.model_cls.from_config(self.model_config )
snake_case_ :List[str] = self.state_dict()
state_dict.pop("""shadow_params""" , snake_case )
model.register_to_config(**snake_case )
self.copy_to(model.parameters() )
model.save_pretrained(snake_case )
def lowerCAmelCase_ ( self: Dict , snake_case: int ) -> float:
snake_case_ :List[str] = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
snake_case_ :Any = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
snake_case_ :int = (1 + step) / (10 + step)
snake_case_ :Optional[Any] = min(snake_case , self.decay )
# make sure decay is not smaller than min_decay
snake_case_ :Optional[int] = max(snake_case , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase_ ( self: int , snake_case: Iterable[torch.nn.Parameter] ) -> Optional[int]:
if isinstance(snake_case , torch.nn.Module ):
snake_case_ :Tuple = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , snake_case , standard_warn=snake_case , )
snake_case_ :Any = parameters.parameters()
snake_case_ :str = list(snake_case )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
snake_case_ :Union[str, Any] = self.get_decay(self.optimization_step )
snake_case_ :Optional[Any] = decay
snake_case_ :Union[str, Any] = 1 - decay
snake_case_ :Tuple = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , snake_case ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
snake_case_ :List[Any] = deepspeed.zero.GatheredParameters(snake_case , modifier_rank=snake_case )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(snake_case )
def lowerCAmelCase_ ( self: int , snake_case: Iterable[torch.nn.Parameter] ) -> None:
snake_case_ :int = list(snake_case )
for s_param, param in zip(self.shadow_params , snake_case ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase_ ( self: str , snake_case: Optional[Any]=None , snake_case: List[str]=None ) -> None:
snake_case_ :List[str] = [
p.to(device=snake_case , dtype=snake_case ) if p.is_floating_point() else p.to(device=snake_case )
for p in self.shadow_params
]
def lowerCAmelCase_ ( self: Union[str, Any] ) -> dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase_ ( self: List[Any] , snake_case: Iterable[torch.nn.Parameter] ) -> None:
snake_case_ :Optional[Any] = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase_ ( self: int , snake_case: Iterable[torch.nn.Parameter] ) -> None:
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , snake_case ):
param.data.copy_(c_param.data )
# Better memory-wise.
snake_case_ :Optional[int] = None
def lowerCAmelCase_ ( self: Tuple , snake_case: dict ) -> None:
snake_case_ :Optional[Any] = copy.deepcopy(snake_case )
snake_case_ :Tuple = state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
snake_case_ :Union[str, Any] = state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , snake_case ):
raise ValueError("""Invalid min_decay""" )
snake_case_ :List[str] = state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , snake_case ):
raise ValueError("""Invalid optimization_step""" )
snake_case_ :Union[str, Any] = state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , snake_case ):
raise ValueError("""Invalid update_after_step""" )
snake_case_ :str = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , snake_case ):
raise ValueError("""Invalid use_ema_warmup""" )
snake_case_ :int = state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
snake_case_ :Union[str, Any] = state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
snake_case_ :Optional[int] = state_dict.get("""shadow_params""" , snake_case )
if shadow_params is not None:
snake_case_ :Tuple = shadow_params
if not isinstance(self.shadow_params , snake_case ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(snake_case , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 66 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase__ : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowercase__ : Dict = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
lowerCAmelCase = self.transformer_dir
shutil.copy(
os.path.join(__SCREAMING_SNAKE_CASE , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Union[str, Any]:
lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' , newline='''\n''' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
self.assertTrue(f.read() , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __SCREAMING_SNAKE_CASE , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with a really long name
lowerCAmelCase = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('''Bert''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __SCREAMING_SNAKE_CASE , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
self.assertFalse(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 338 | 0 |
'''simple docstring'''
import requests
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> None:
__lowerCamelCase = {'''Content-Type''': '''application/json'''}
__lowerCamelCase = requests.post(UpperCamelCase__ , json={'''text''': message_body} , headers=UpperCamelCase__ )
if response.status_code != 2_00:
__lowerCamelCase = (
'''Request to slack returned an error '''
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(UpperCamelCase__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 67 | import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = split_dict._to_yaml_list()
assert len(snake_case__ ) == len(snake_case__ )
lowerCAmelCase = SplitDict._from_yaml_list(snake_case__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=snake_case__ ), SplitInfo(dataset_name='''my_dataset''' )] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 338 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 68 | import unittest
import numpy as np
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ) -> np.ndarray:
lowerCAmelCase = np.shape(snake_case__ )
lowerCAmelCase = np.shape(snake_case__ )
lowerCAmelCase = np.shape(snake_case__ )
if shape_a[0] != shape_b[0]:
lowerCAmelCase = (
'''Expected the same number of rows for A and B. '''
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(snake_case__ )
if shape_b[1] != shape_c[1]:
lowerCAmelCase = (
'''Expected the same number of columns for B and C. '''
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(snake_case__ )
lowerCAmelCase = pseudo_inv
if a_inv is None:
try:
lowerCAmelCase = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
lowerCAmelCase = schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.block([[a, b], [b.T, c]] )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 338 | 0 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__UpperCamelCase = 12_8022
__UpperCamelCase = 12_8028
@require_sentencepiece
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = MaMaaaTokenizer
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
def a_ ( self) -> Tuple:
super().setUp()
snake_case_ = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
snake_case_ = dict(zip(lowerCAmelCase__, range(len(lowerCAmelCase__))))
snake_case_ = Path(self.tmpdirname)
save_json(lowerCAmelCase__, save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase__, save_dir / VOCAB_FILES_NAMES['spm_file'])
snake_case_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self, **lowerCAmelCase__) -> Union[str, Any]:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> Optional[Any]:
return (
"This is a test",
"This is a test",
)
def a_ ( self) -> Optional[int]:
snake_case_ = '</s>'
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__), lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__), lowerCAmelCase__)
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.get_tokenizer()
snake_case_ = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0], '</s>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[-1], '<s>')
self.assertEqual(len(lowerCAmelCase__), tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip('Skip this test while all models are still to be uploaded.')
def a_ ( self) -> Tuple:
pass
def a_ ( self) -> Tuple:
snake_case_ = self.get_tokenizer()
snake_case_ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase__, ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__), [2, 3, 4, 5, 6], )
snake_case_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(lowerCAmelCase__, ['▁This', '▁is', '▁a', '▁t', 'est'])
snake_case_ = tokenizer.convert_tokens_to_string(lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__, 'This is a test')
@slow
def a_ ( self) -> Tuple:
# fmt: off
snake_case_ = {'input_ids': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__, model_name='facebook/m2m100_418M', revision='c168bae485c864188cf9aa0e4108b0b6934dc91e', )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = "facebook/m2m100_418M"
SCREAMING_SNAKE_CASE_ = [
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
SCREAMING_SNAKE_CASE_ = [
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
SCREAMING_SNAKE_CASE_ = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def a_ ( cls) -> Any:
snake_case_ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='en', tgt_lang='fr')
snake_case_ = 1
return cls
def a_ ( self) -> Tuple:
self.assertEqual(self.tokenizer.get_lang_id('ar'), 12_8006)
self.assertEqual(self.tokenizer.get_lang_id('en'), 12_8022)
self.assertEqual(self.tokenizer.get_lang_id('ro'), 12_8076)
self.assertEqual(self.tokenizer.get_lang_id('mr'), 12_8063)
def a_ ( self) -> List[str]:
snake_case_ = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCAmelCase__), self.tokenizer.vocab_size)
self.assertEqual(vocab['<unk>'], 3)
self.assertIn(self.tokenizer.get_lang_token('en'), lowerCAmelCase__)
def a_ ( self) -> Dict:
snake_case_ = 'en'
snake_case_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowerCAmelCase__)
def a_ ( self) -> Any:
self.assertIn(lowerCAmelCase__, self.tokenizer.all_special_ids)
# fmt: off
snake_case_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
snake_case_ = self.tokenizer.decode(lowerCAmelCase__, skip_special_tokens=lowerCAmelCase__)
snake_case_ = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__, lowerCAmelCase__)
self.assertNotIn(self.tokenizer.eos_token, lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
snake_case_ = tempfile.mkdtemp()
snake_case_ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCAmelCase__)
snake_case_ = MaMaaaTokenizer.from_pretrained(lowerCAmelCase__)
self.assertDictEqual(new_tok.lang_token_to_id, lowerCAmelCase__)
@require_torch
def a_ ( self) -> Any:
snake_case_ = 'en'
snake_case_ = 'fr'
snake_case_ = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCAmelCase__, return_tensors='pt')
snake_case_ = shift_tokens_right(
batch['labels'], self.tokenizer.pad_token_id, self.tokenizer.eos_token_id)
for k in batch:
snake_case_ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def a_ ( self) -> Any:
snake_case_ = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('mr')])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
snake_case_ = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('zh')])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
@require_torch
def a_ ( self) -> Dict:
snake_case_ = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('mr')])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
snake_case_ = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('zh')])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def a_ ( self) -> Any:
snake_case_ = self.tokenizer._build_translation_inputs('A test', return_tensors='pt', src_lang='en', tgt_lang='ar')
self.assertEqual(
nested_simplify(lowerCAmelCase__), {
# en_XX, A, test, EOS
'input_ids': [[12_8022, 58, 4183, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 12_8006,
}, )
| 69 | import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowercase__ : Any = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
lowerCAmelCase = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
lowercase__ : List[Any] = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = list(s_dict.keys() )
for key in keys:
lowerCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowerCAmelCase = new_key.replace(snake_case__ , snake_case__ )
print(f"{key} -> {new_key}" )
lowerCAmelCase = s_dict.pop(snake_case__ )
return s_dict
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = emb.weight.shape
lowerCAmelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
lowerCAmelCase = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> bytes:
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase = os.path.basename(snake_case__ )
lowerCAmelCase = url.split('''/''' )[-2]
lowerCAmelCase = os.path.join(snake_case__ , snake_case__ )
if os.path.exists(snake_case__ ) and not os.path.isfile(snake_case__ ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(snake_case__ ):
lowerCAmelCase = open(snake_case__ , '''rb''' ).read()
if hashlib.shaaaa(snake_case__ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(snake_case__ ) as source, open(snake_case__ , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=snake_case__ , unit_divisor=1_0_2_4 ) as loop:
while True:
lowerCAmelCase = source.read(8_1_9_2 )
if not buffer:
break
output.write(snake_case__ )
loop.update(len(snake_case__ ) )
lowerCAmelCase = open(snake_case__ , '''rb''' ).read()
if hashlib.shaaaa(snake_case__ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
if ".pt" not in checkpoint_path:
lowerCAmelCase = _download(_MODELS[checkpoint_path] )
else:
lowerCAmelCase = torch.load(snake_case__ , map_location='''cpu''' )
lowerCAmelCase = original_checkpoint['''dims''']
lowerCAmelCase = original_checkpoint['''model_state_dict''']
lowerCAmelCase = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(snake_case__ )
rename_keys(snake_case__ )
lowerCAmelCase = True
lowerCAmelCase = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
lowerCAmelCase = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=snake_case__ , decoder_ffn_dim=snake_case__ , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
lowerCAmelCase = WhisperForConditionalGeneration(snake_case__ )
lowerCAmelCase , lowerCAmelCase = model.model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0 and not set(snake_case__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCAmelCase = proj_out_weights
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase__ : int = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 338 | 0 |
'''simple docstring'''
from math import factorial
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(lowerCAmelCase ) // (factorial(lowerCAmelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
F"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
'''If a class of 40 students must be arranged into groups of''',
F"""4 for group projects, there are {combinations(40, 4)} ways""",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
F"""are {combinations(10, 3)} ways that first, second and""",
'''third place can be awarded.''',
)
| 70 | from ...processing_utils import ProcessorMixin
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = ["""image_processor""", """feature_extractor"""]
UpperCAmelCase_ : Optional[int] = """TvltImageProcessor"""
UpperCAmelCase_ : Optional[int] = """TvltFeatureExtractor"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
super().__init__(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor
lowerCAmelCase = feature_extractor
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) ->List[Any]:
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
lowerCAmelCase = None
if images is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , mask_pixel=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if images_mixed is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , is_mixed=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if audio is not None:
lowerCAmelCase = self.feature_extractor(
__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , mask_audio=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
if audio is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images_mixed_dict is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
return output_dict
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.image_processor.model_input_names
lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 338 | 0 |
import warnings
from functools import wraps
from typing import Callable
def A ( a_ ) -> Callable:
@wraps(a_ )
def _inner_fn(*a_ ,**a_ ):
warnings.warn(
(F'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') ,a_ ,)
return fn(*a_ ,**a_ )
return _inner_fn
| 71 | def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[str]:
lowerCAmelCase = len(snake_case__ )
for i in range(length - 1 ):
lowerCAmelCase = i
for k in range(i + 1 , snake_case__ ):
if collection[k] < collection[least]:
lowerCAmelCase = k
if least != i:
lowerCAmelCase , lowerCAmelCase = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowercase__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
lowercase__ : str = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 338 | 0 |
"""simple docstring"""
import math
from collections.abc import Callable
def snake_case_ ( A_ : Callable[[float], float], A_ : float, A_ : float ):
'''simple docstring'''
_lowerCamelCase : float = xa
_lowerCamelCase : float = xa
while True:
if x_n == x_na or function(A_ ) == function(A_ ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
_lowerCamelCase : float = x_na - (
function(A_ ) / ((function(A_ ) - function(A_ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
_lowerCamelCase : int = x_na
_lowerCamelCase : List[Any] = x_na
def snake_case_ ( A_ : float ):
'''simple docstring'''
return math.pow(A_, 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 72 | import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=19 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) ->Union[str, Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__SCREAMING_SNAKE_CASE , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = EsmForProteinFolding(config=__SCREAMING_SNAKE_CASE ).float()
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Dict = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCAmelCase_ : List[Any] = ()
UpperCAmelCase_ : Tuple = {} if is_torch_available() else {}
UpperCAmelCase_ : List[str] = False
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = EsmFoldModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip('''Does not support attention outputs''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
pass
@unittest.skip
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold only has one output format.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
@require_torch
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
lowerCAmelCase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )['''positions''']
lowerCAmelCase = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 338 | 0 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__=None , lowerCamelCase__=None ) -> Any:
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class A_ :
_UpperCAmelCase : str = field(
metadata={'''help''': '''The csv file to plot.'''} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
_UpperCAmelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
_UpperCAmelCase : Optional[List[str]] = list_field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
try:
int(lowerCamelCase__ )
return True
except ValueError:
return False
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[Any]:
try:
float(lowerCamelCase__ )
return True
except ValueError:
return False
class A_ :
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : str = args
__lowerCamelCase : Any = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}})
with open(self.args.csv_file ,newline='') as csv_file:
__lowerCamelCase : Any = csv.DictReader(SCREAMING_SNAKE_CASE__)
for row in reader:
__lowerCamelCase : Any = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size']))
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length']))
if can_convert_to_int(row['result']):
# value is not None
__lowerCamelCase : Tuple = int(row['result'])
elif can_convert_to_float(row['result']):
# value is not None
__lowerCamelCase : List[Any] = float(row['result'])
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase , __lowerCamelCase : List[Any] = plt.subplots()
__lowerCamelCase : Union[str, Any] = 'Time usage' if self.args.is_time else 'Memory usage'
__lowerCamelCase : Optional[Any] = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log')
ax.set_yscale('log')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
for model_name_idx, model_name in enumerate(self.result_dict.keys()):
__lowerCamelCase : int = sorted(set(self.result_dict[model_name]['bsz']))
__lowerCamelCase : Tuple = sorted(set(self.result_dict[model_name]['seq_len']))
__lowerCamelCase : int = self.result_dict[model_name]['result']
((__lowerCamelCase) , (__lowerCamelCase)) : List[str] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__lowerCamelCase : List[str] = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__lowerCamelCase : int = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] ,dtype=SCREAMING_SNAKE_CASE__ ,)
else:
__lowerCamelCase : Union[str, Any] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] ,dtype=np.floataa ,)
((__lowerCamelCase) , (__lowerCamelCase)) : Optional[Any] = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
__lowerCamelCase : Tuple = np.asarray(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)[: len(SCREAMING_SNAKE_CASE__)]
plt.scatter(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,label=F"{label_model_name} - {inner_loop_label}: {inner_loop_value}")
plt.plot(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,'--')
title_str += F" {label_model_name} vs."
__lowerCamelCase : List[Any] = title_str[:-4]
__lowerCamelCase : int = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(SCREAMING_SNAKE_CASE__)
plt.xlabel(SCREAMING_SNAKE_CASE__)
plt.ylabel(SCREAMING_SNAKE_CASE__)
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file)
else:
plt.show()
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
__lowerCamelCase : Optional[Any] = HfArgumentParser(lowerCamelCase__ )
__lowerCamelCase : List[str] = parser.parse_args_into_dataclasses()[0]
__lowerCamelCase : str = Plot(args=lowerCamelCase__ )
plot.plot()
if __name__ == "__main__":
main()
| 73 | import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = ["""image_processor""", """tokenizer"""]
UpperCAmelCase_ : int = """OwlViTImageProcessor"""
UpperCAmelCase_ : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
lowerCAmelCase = kwargs.pop('''feature_extractor''' )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="max_length" , __SCREAMING_SNAKE_CASE="np" , **__SCREAMING_SNAKE_CASE ) ->int:
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or (isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(text[0] , __SCREAMING_SNAKE_CASE )):
lowerCAmelCase = [self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )]
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(text[0] , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = []
# Maximum number of queries across batch
lowerCAmelCase = max([len(__SCREAMING_SNAKE_CASE ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__SCREAMING_SNAKE_CASE ) != max_num_queries:
lowerCAmelCase = t + [''' '''] * (max_num_queries - len(__SCREAMING_SNAKE_CASE ))
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
encodings.append(__SCREAMING_SNAKE_CASE )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
lowerCAmelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCAmelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCAmelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
lowerCAmelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCAmelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = input_ids
lowerCAmelCase = attention_mask
if query_images is not None:
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).pixel_values
lowerCAmelCase = query_pixel_values
if images is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) , tensor_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Optional[int]:
return self.image_processor.post_process(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Any:
return self.image_processor.post_process_object_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Tuple:
return self.image_processor.post_process_image_guided_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->str:
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 338 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 74 | import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''spiece.model'''}
lowercase__ : Optional[int] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
lowercase__ : Any = {
'''albert-base-v1''': 5_1_2,
'''albert-large-v1''': 5_1_2,
'''albert-xlarge-v1''': 5_1_2,
'''albert-xxlarge-v1''': 5_1_2,
'''albert-base-v2''': 5_1_2,
'''albert-large-v2''': 5_1_2,
'''albert-xlarge-v2''': 5_1_2,
'''albert-xxlarge-v2''': 5_1_2,
}
lowercase__ : Tuple = '''▁'''
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) ->None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase = (
AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE , normalized=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else mask_token
)
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) ->int:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Any:
if self.remove_space:
lowerCAmelCase = ''' '''.join(inputs.strip().split() )
else:
lowerCAmelCase = inputs
lowerCAmelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowerCAmelCase = unicodedata.normalize('''NFKD''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = ''''''.join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
lowerCAmelCase = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = self.preprocess_text(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
for piece in pieces:
if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase = cur_pieces[1:]
else:
lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__SCREAMING_SNAKE_CASE )
else:
new_pieces.append(__SCREAMING_SNAKE_CASE )
return new_pieces
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
lowerCAmelCase = []
lowerCAmelCase = ''''''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 338 | 0 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def a_ ( __snake_case : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ =checkpoints.load_tax_checkpoint(__snake_case )
lowerCamelCase_ =flatten_dict(__snake_case )
return flax_params
def a_ ( __snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ ={
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowerCamelCase_ ={
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCamelCase_ ='''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCamelCase_ =new_key.replace(__snake_case , __snake_case )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCamelCase_ =new_key.replace(__snake_case , __snake_case )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case )
lowerCamelCase_ =new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case )
lowerCamelCase_ =flax_dict[key]
lowerCamelCase_ ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCamelCase_ =torch.from_numpy(converted_dict[key].T )
else:
lowerCamelCase_ =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Any=False , __snake_case : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =get_flax_param(__snake_case )
if not use_large:
lowerCamelCase_ =PixaStructVisionConfig()
lowerCamelCase_ =PixaStructTextConfig()
else:
lowerCamelCase_ =PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowerCamelCase_ =PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowerCamelCase_ =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__snake_case )
lowerCamelCase_ =PixaStructForConditionalGeneration(__snake_case )
lowerCamelCase_ =rename_and_convert_flax_params(__snake_case )
model.load_state_dict(__snake_case )
lowerCamelCase_ =AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowerCamelCase_ =PixaStructImageProcessor()
lowerCamelCase_ =PixaStructProcessor(image_processor=__snake_case , tokenizer=__snake_case )
if use_large:
lowerCamelCase_ =4096
lowerCamelCase_ =True
# mkdir if needed
os.makedirs(__snake_case , exist_ok=__snake_case )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
print('''Model saved in {}'''.format(__snake_case ) )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
a_ : Tuple = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 75 | import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = (DEISMultistepScheduler,)
UpperCAmelCase_ : int = (("""num_inference_steps""", 25),)
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->str:
lowerCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase , lowerCAmelCase = sample, sample
for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
if scheduler is None:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase = scheduler.timesteps[5]
lowerCAmelCase = scheduler.timesteps[6]
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , algorithm_type='''deis''' , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = self.full_loop(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
assert not torch.isnan(__SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
| 338 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = 'https://openaipublic.azureedge.net/jukebox/models/'
a_ = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def lowerCamelCase__ ( _a):
if key.endswith(".model.1.bias") and len(key.split(".")) > 10:
SCREAMING_SNAKE_CASE : Any = key.replace(".model.1.bias" , ".conv1d_1.bias")
elif key.endswith(".model.1.weight") and len(key.split(".")) > 10:
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(".model.1.weight" , ".conv1d_1.weight")
elif key.endswith(".model.3.bias") and len(key.split(".")) > 10:
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias")
elif key.endswith(".model.3.weight") and len(key.split(".")) > 10:
SCREAMING_SNAKE_CASE : int = key.replace(".model.3.weight" , ".conv1d_2.weight")
if "conditioner_blocks.0." in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks")
if "prime_prior" in key:
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace("prime_prior" , "encoder")
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(".emb." , ".")
if key.endswith("k"): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook")
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding.")
if "x_emb.emb." in key:
SCREAMING_SNAKE_CASE : Dict = key.replace("0.x_emb.emb" , "embed_tokens")
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm")
if ".ln" in key:
return key.replace(".ln" , ".layer_norm")
if "_ln" in key:
return key.replace("_ln" , "_layer_norm")
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in")
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head")
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out")
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens")
return key
def lowerCamelCase__ ( _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Tuple = {}
import re
SCREAMING_SNAKE_CASE : Optional[int] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)")
SCREAMING_SNAKE_CASE : Any = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)")
SCREAMING_SNAKE_CASE : List[str] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)")
SCREAMING_SNAKE_CASE : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)")
SCREAMING_SNAKE_CASE : Dict = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)")
SCREAMING_SNAKE_CASE : Optional[int] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)")
SCREAMING_SNAKE_CASE : Dict = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)")
SCREAMING_SNAKE_CASE : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)")
SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)")
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_a):
SCREAMING_SNAKE_CASE : str = re_encoder_block_conv_in.match(_a)
SCREAMING_SNAKE_CASE : List[Any] = regex_match.groups()
SCREAMING_SNAKE_CASE : List[str] = int(groups[2]) * 2 + int(groups[3])
SCREAMING_SNAKE_CASE : Any = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
SCREAMING_SNAKE_CASE : int = re_encoder_block_conv_in.sub(_a , _a)
elif re_encoder_block_resnet.fullmatch(_a):
SCREAMING_SNAKE_CASE : Optional[int] = re_encoder_block_resnet.match(_a)
SCREAMING_SNAKE_CASE : Dict = regex_match.groups()
SCREAMING_SNAKE_CASE : List[Any] = int(groups[2]) * 2 + int(groups[3])
SCREAMING_SNAKE_CASE : Optional[Any] = {"1": 1, "3": 2}[groups[-2]]
SCREAMING_SNAKE_CASE : List[Any] = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
SCREAMING_SNAKE_CASE : Optional[Any] = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
SCREAMING_SNAKE_CASE : Dict = prefix + resnet_block
SCREAMING_SNAKE_CASE : Tuple = re_encoder_block_resnet.sub(_a , _a)
elif re_encoder_block_proj_out.fullmatch(_a):
SCREAMING_SNAKE_CASE : Optional[Any] = re_encoder_block_proj_out.match(_a)
SCREAMING_SNAKE_CASE : List[str] = regex_match.groups()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
SCREAMING_SNAKE_CASE : Optional[int] = re_encoder_block_proj_out.sub(_a , _a)
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_a):
SCREAMING_SNAKE_CASE : Optional[Any] = re_decoder_block_conv_out.match(_a)
SCREAMING_SNAKE_CASE : Any = regex_match.groups()
SCREAMING_SNAKE_CASE : List[Any] = int(groups[2]) * 2 + int(groups[3]) - 2
SCREAMING_SNAKE_CASE : str = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
SCREAMING_SNAKE_CASE : int = re_decoder_block_conv_out.sub(_a , _a)
elif re_decoder_block_resnet.fullmatch(_a):
SCREAMING_SNAKE_CASE : int = re_decoder_block_resnet.match(_a)
SCREAMING_SNAKE_CASE : str = regex_match.groups()
SCREAMING_SNAKE_CASE : List[str] = int(groups[2]) * 2 + int(groups[3]) - 2
SCREAMING_SNAKE_CASE : List[Any] = {"1": 1, "3": 2}[groups[-2]]
SCREAMING_SNAKE_CASE : Tuple = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
SCREAMING_SNAKE_CASE : str = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
SCREAMING_SNAKE_CASE : Union[str, Any] = prefix + resnet_block
SCREAMING_SNAKE_CASE : Optional[Any] = re_decoder_block_resnet.sub(_a , _a)
elif re_decoder_block_proj_in.fullmatch(_a):
SCREAMING_SNAKE_CASE : List[str] = re_decoder_block_proj_in.match(_a)
SCREAMING_SNAKE_CASE : Any = regex_match.groups()
SCREAMING_SNAKE_CASE : List[str] = f"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
SCREAMING_SNAKE_CASE : Union[str, Any] = re_decoder_block_proj_in.sub(_a , _a)
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_a):
SCREAMING_SNAKE_CASE : Optional[int] = re_prior_cond_conv_out.match(_a)
SCREAMING_SNAKE_CASE : int = regex_match.groups()
SCREAMING_SNAKE_CASE : Optional[int] = int(groups[1]) * 2 + int(groups[2]) - 2
SCREAMING_SNAKE_CASE : Optional[Any] = f"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
SCREAMING_SNAKE_CASE : Any = re_prior_cond_conv_out.sub(_a , _a)
elif re_prior_cond_resnet.fullmatch(_a):
SCREAMING_SNAKE_CASE : List[Any] = re_prior_cond_resnet.match(_a)
SCREAMING_SNAKE_CASE : int = regex_match.groups()
SCREAMING_SNAKE_CASE : Tuple = int(groups[1]) * 2 + int(groups[2]) - 2
SCREAMING_SNAKE_CASE : Dict = {"1": 1, "3": 2}[groups[-2]]
SCREAMING_SNAKE_CASE : Tuple = f"conditioner_blocks.upsampler.upsample_block.{block_index}."
SCREAMING_SNAKE_CASE : Any = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
SCREAMING_SNAKE_CASE : Optional[Any] = prefix + resnet_block
SCREAMING_SNAKE_CASE : List[Any] = re_prior_cond_resnet.sub(_a , _a)
elif re_prior_cond_proj_in.fullmatch(_a):
SCREAMING_SNAKE_CASE : Optional[int] = re_prior_cond_proj_in.match(_a)
SCREAMING_SNAKE_CASE : Optional[int] = regex_match.groups()
SCREAMING_SNAKE_CASE : Any = f"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
SCREAMING_SNAKE_CASE : Dict = re_prior_cond_proj_in.sub(_a , _a)
# keep original key
else:
SCREAMING_SNAKE_CASE : List[Any] = original_key
SCREAMING_SNAKE_CASE : Optional[int] = replace_key(_a)
if f"{key_prefix}.{key}" not in model_state_dict or key is None:
print(f"failed converting {original_key} to {key}, does not match")
# handle missmatched shape
elif value.shape != model_state_dict[f"{key_prefix}.{key}"].shape:
SCREAMING_SNAKE_CASE : Tuple = model_state_dict[f"{key_prefix}.{key}"]
print(f"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match")
SCREAMING_SNAKE_CASE : List[Any] = original_key
SCREAMING_SNAKE_CASE : str = original_key
SCREAMING_SNAKE_CASE : List[str] = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( _a=None , _a=None):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"{pytorch_dump_folder_path}/{file.split('/')[-1]}"):
SCREAMING_SNAKE_CASE : List[Any] = requests.get(f"{PREFIX}{file}" , allow_redirects=_a)
os.makedirs(f"{pytorch_dump_folder_path}/" , exist_ok=_a)
open(f"{pytorch_dump_folder_path}/{file.split('/')[-1]}" , "wb").write(r.content)
SCREAMING_SNAKE_CASE : Tuple = MODEL_MAPPING[model_name.split("/")[-1]]
SCREAMING_SNAKE_CASE : Union[str, Any] = JukeboxConfig.from_pretrained(_a)
SCREAMING_SNAKE_CASE : Any = JukeboxModel(_a)
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Tuple = {}
for i, dict_name in enumerate(_a):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(f"{pytorch_dump_folder_path}/{dict_name.split('/')[-1]}")["model"]
SCREAMING_SNAKE_CASE : str = {}
for k in old_dic.keys():
if k.endswith(".b"):
SCREAMING_SNAKE_CASE : List[str] = old_dic[k]
elif k.endswith(".w"):
SCREAMING_SNAKE_CASE : Optional[Any] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
SCREAMING_SNAKE_CASE : List[Any] = old_dic[k]
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = old_dic[k]
SCREAMING_SNAKE_CASE : Tuple = "vqvae" if i == 0 else f"priors.{3 - i}"
SCREAMING_SNAKE_CASE : int = fix_jukebox_keys(_a , model.state_dict() , _a , _a)
weight_dict.append(_a)
SCREAMING_SNAKE_CASE : Tuple = weight_dict.pop(0)
model.vqvae.load_state_dict(_a)
for i in range(len(_a)):
model.priors[i].load_state_dict(weight_dict[2 - i])
Path(_a).mkdir(exist_ok=_a)
with open(f"{pytorch_dump_folder_path}/mapping.json" , "w") as txtfile:
json.dump(_a , _a)
print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
model.save_pretrained(_a)
return weight_dict
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
a_ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 76 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = '''google/ncsnpp-celebahq-256'''
lowerCAmelCase = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 338 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=[0.5, 0.5, 0.5] , a=[0.5, 0.5, 0.5] , a=True , a=1 / 2_5_5 , a=True , ) -> Any:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase__ : Dict = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
lowercase__ : Optional[int] = parent
lowercase__ : Tuple = batch_size
lowercase__ : List[str] = num_channels
lowercase__ : List[str] = min_resolution
lowercase__ : Tuple = max_resolution
lowercase__ : Union[str, Any] = do_resize
lowercase__ : Dict = size
lowercase__ : str = do_normalize
lowercase__ : List[Any] = image_mean
lowercase__ : int = image_std
lowercase__ : List[Any] = do_rescale
lowercase__ : int = rescale_factor
lowercase__ : int = do_pad
def _UpperCAmelCase ( self ) -> Optional[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _UpperCAmelCase ( self , a , a=False ) -> Dict:
if not batched:
lowercase__ : str = image_inputs[0]
if isinstance(a , Image.Image ):
lowercase__ , lowercase__ : List[str] = image.size
else:
lowercase__ , lowercase__ : int = image.shape[1], image.shape[2]
if w < h:
lowercase__ : Any = int(self.size['shortest_edge'] * h / w )
lowercase__ : Dict = self.size['shortest_edge']
elif w > h:
lowercase__ : int = self.size['shortest_edge']
lowercase__ : Tuple = int(self.size['shortest_edge'] * w / h )
else:
lowercase__ : Optional[Any] = self.size['shortest_edge']
lowercase__ : List[Any] = self.size['shortest_edge']
else:
lowercase__ : Union[str, Any] = []
for image in image_inputs:
lowercase__ , lowercase__ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__ : Optional[Any] = max(a , key=lambda a : item[0] )[0]
lowercase__ : Optional[Any] = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : List[str] = DetaImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[Any] = DetaImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'do_rescale' ) )
self.assertTrue(hasattr(a , 'do_pad' ) )
self.assertTrue(hasattr(a , 'size' ) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
def _UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : Optional[int] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ , lowercase__ : Tuple = self.image_processor_tester.get_expected_values(a , batched=a )
lowercase__ : List[str] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ) -> str:
# Initialize image_processing
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : Union[str, Any] = image_processing(a , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : int = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : Optional[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : Optional[int] = image_processing(a , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _UpperCAmelCase ( self ) -> Dict:
# prepare image and target
lowercase__ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowercase__ : Tuple = json.loads(f.read() )
lowercase__ : Optional[int] = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
lowercase__ : Union[str, Any] = DetaImageProcessor()
lowercase__ : List[str] = image_processing(images=a , annotations=a , return_tensors='pt' )
# verify pixel values
lowercase__ : int = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , a )
lowercase__ : List[str] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , a , atol=1e-4 ) )
# verify area
lowercase__ : List[str] = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , a ) )
# verify boxes
lowercase__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , a )
lowercase__ : str = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , a , atol=1e-3 ) )
# verify image_id
lowercase__ : Tuple = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , a ) )
# verify is_crowd
lowercase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , a ) )
# verify class_labels
lowercase__ : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , a ) )
# verify orig_size
lowercase__ : Tuple = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , a ) )
# verify size
lowercase__ : Optional[int] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , a ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
# prepare image, target and masks_path
lowercase__ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowercase__ : int = json.loads(f.read() )
lowercase__ : Optional[Any] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
lowercase__ : Any = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowercase__ : List[Any] = DetaImageProcessor(format='coco_panoptic' )
lowercase__ : int = image_processing(images=a , annotations=a , masks_path=a , return_tensors='pt' )
# verify pixel values
lowercase__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , a )
lowercase__ : Optional[int] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , a , atol=1e-4 ) )
# verify area
lowercase__ : List[str] = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , a ) )
# verify boxes
lowercase__ : int = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , a )
lowercase__ : List[Any] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , a , atol=1e-3 ) )
# verify image_id
lowercase__ : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , a ) )
# verify is_crowd
lowercase__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , a ) )
# verify class_labels
lowercase__ : Tuple = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , a ) )
# verify masks
lowercase__ : Dict = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , a )
# verify orig_size
lowercase__ : Any = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , a ) )
# verify size
lowercase__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , a ) )
| 77 | from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowercase__ : Dict = logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase_ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
lowerCAmelCase = self.get_masked_index(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Dict[str, GenericTensor]:
if return_tensors is None:
lowerCAmelCase = self.framework
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = self.model(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model_inputs['''input_ids''']
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=None ) ->str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase = target_ids.shape[0]
lowerCAmelCase = model_outputs['''input_ids'''][0]
lowerCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowerCAmelCase = outputs.numpy()
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
lowerCAmelCase = tf.gather_nd(tf.squeeze(__SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
lowerCAmelCase = tf.math.top_k(__SCREAMING_SNAKE_CASE , k=__SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
lowerCAmelCase = probs[..., target_ids]
lowerCAmelCase , lowerCAmelCase = probs.topk(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
lowerCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowerCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowerCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase = target_ids[p].tolist()
lowerCAmelCase = p
# Filter padding out:
lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [targets]
try:
lowerCAmelCase = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase = {}
lowerCAmelCase = []
for target in targets:
lowerCAmelCase = vocab.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if id_ is None:
lowerCAmelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , max_length=1 , truncation=__SCREAMING_SNAKE_CASE , )['''input_ids''']
if len(__SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
lowerCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
lowerCAmelCase = list(set(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
return target_ids
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->Dict:
lowerCAmelCase = {}
if targets is not None:
lowerCAmelCase = self.get_target_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = target_ids
if top_k is not None:
lowerCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 338 | 0 |
"""simple docstring"""
from ... import PretrainedConfig
snake_case_ = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__UpperCamelCase = """nezha"""
def __init__( self :List[Any] , lowercase_ :Optional[Any]=2_11_28 , lowercase_ :List[str]=7_68 , lowercase_ :List[str]=12 , lowercase_ :Dict=12 , lowercase_ :Tuple=30_72 , lowercase_ :Optional[int]="gelu" , lowercase_ :Optional[Any]=0.1 , lowercase_ :List[Any]=0.1 , lowercase_ :List[Any]=5_12 , lowercase_ :Tuple=64 , lowercase_ :str=2 , lowercase_ :Optional[Any]=0.02 , lowercase_ :int=1E-12 , lowercase_ :Any=0.1 , lowercase_ :Optional[int]=0 , lowercase_ :Any=2 , lowercase_ :Dict=3 , lowercase_ :Any=True , **lowercase_ :Tuple , ) -> List[Any]:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = max_relative_position
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = classifier_dropout
UpperCAmelCase = use_cache
| 78 | from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase__ : int = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowercase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 338 | 0 |
'''simple docstring'''
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : list ):
'''simple docstring'''
_A = set_counts
_A = max(__UpperCAmelCase )
_A = len(__UpperCAmelCase )
_A = [1] * num_sets
_A = list(range(__UpperCAmelCase ) )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : int ):
'''simple docstring'''
_A = self.get_parent(__UpperCAmelCase )
_A = self.get_parent(__UpperCAmelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
_A = 0
_A = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
_A = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
_A = 0
_A = src_parent
_A = self.set_counts[src_parent]
_A = max(self.max_set , __UpperCAmelCase )
return True
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : int ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
_A = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 79 | lowercase__ : Optional[int] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def SCREAMING_SNAKE_CASE_ ( ) -> None:
lowerCAmelCase = input('''Enter message: ''' )
lowerCAmelCase = input('''Enter key [alphanumeric]: ''' )
lowerCAmelCase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowerCAmelCase = '''encrypt'''
lowerCAmelCase = encrypt_message(snake_case__ , snake_case__ )
elif mode.lower().startswith('''d''' ):
lowerCAmelCase = '''decrypt'''
lowerCAmelCase = decrypt_message(snake_case__ , snake_case__ )
print(f"\n{mode.title()}ed message:" )
print(snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
return translate_message(snake_case__ , snake_case__ , '''encrypt''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
return translate_message(snake_case__ , snake_case__ , '''decrypt''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> str:
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = key.upper()
for symbol in message:
lowerCAmelCase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case__ ):
lowerCAmelCase = 0
else:
translated.append(snake_case__ )
return "".join(snake_case__ )
if __name__ == "__main__":
main()
| 338 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _UpperCamelCase ( ) -> Any:
'''simple docstring'''
UpperCamelCase__ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=__A )
UpperCamelCase__ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=__A )
env_command_parser(subparsers=__A )
launch_command_parser(subparsers=__A )
tpu_command_parser(subparsers=__A )
test_command_parser(subparsers=__A )
# Let's go
UpperCamelCase__ = parser.parse_args()
if not hasattr(__A , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(__A )
if __name__ == "__main__":
main()
| 80 | from collections import defaultdict
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0_0_0_0_0 , snake_case__ = 1_0 ) -> int:
lowerCAmelCase = defaultdict(snake_case__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCAmelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCAmelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(snake_case__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f'{solution() = }')
| 338 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Tuple = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowerCamelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 81 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Union[str, Any]:
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = features.copy() if features else default_expected_features
lowerCAmelCase = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase = TextDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
if issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase = text_path
elif issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase = [text_path]
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=("train",) ) -> Optional[Any]:
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase = TextDatasetReader({'''train''': text_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCAmelCase = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = features.copy() if features else default_expected_features
lowerCAmelCase = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase = TextDatasetReader({'''train''': text_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any:
if split:
lowerCAmelCase = {split: text_path}
else:
lowerCAmelCase = '''train'''
lowerCAmelCase = {'''train''': text_path, '''test''': text_path}
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 338 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = (PNDMScheduler,)
__lowerCamelCase = (('''num_inference_steps''', 50),)
def snake_case ( self , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_snake_case )
return config
def snake_case ( self , _snake_case=0 , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("""num_inference_steps""" , _snake_case )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config(**_snake_case )
_lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
_lowerCAmelCase = scheduler_class.from_pretrained(_snake_case )
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[:]
_lowerCAmelCase = scheduler.step_prk(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = new_scheduler.step_prk(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowerCAmelCase = scheduler.step_plms(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = new_scheduler.step_plms(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self , _snake_case=0 , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("""num_inference_steps""" , _snake_case )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
_lowerCAmelCase = scheduler_class.from_pretrained(_snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[:]
_lowerCAmelCase = scheduler.step_prk(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = new_scheduler.step_prk(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowerCAmelCase = scheduler.step_plms(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = new_scheduler.step_plms(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case ( self , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_snake_case )
_lowerCAmelCase = scheduler_class(**_snake_case )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowerCAmelCase = model(_snake_case , _snake_case )
_lowerCAmelCase = scheduler.step_prk(_snake_case , _snake_case , _snake_case ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowerCAmelCase = model(_snake_case , _snake_case )
_lowerCAmelCase = scheduler.step_plms(_snake_case , _snake_case , _snake_case ).prev_sample
return sample
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("""num_inference_steps""" , _snake_case )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_snake_case )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(_snake_case , """set_timesteps""" ):
scheduler.set_timesteps(_snake_case )
elif num_inference_steps is not None and not hasattr(_snake_case , """set_timesteps""" ):
_lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowerCAmelCase = dummy_past_residuals[:]
_lowerCAmelCase = scheduler.step_prk(_snake_case , 0 , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = scheduler.step_prk(_snake_case , 1 , _snake_case , **_snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_lowerCAmelCase = scheduler.step_plms(_snake_case , 0 , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = scheduler.step_plms(_snake_case , 1 , _snake_case , **_snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case )
def snake_case ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_snake_case )
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(steps_offset=1 )
_lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def snake_case ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case )
def snake_case ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_snake_case )
def snake_case ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def snake_case ( self ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=_snake_case )
def snake_case ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = 27
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowerCAmelCase = scheduler.step_prk(_snake_case , _snake_case , _snake_case ).prev_sample
def snake_case ( self ):
"""simple docstring"""
with self.assertRaises(_snake_case ):
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop()
_lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
_lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop(prediction_type="""v_prediction""" )
_lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
_lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop(set_alpha_to_one=_snake_case , beta_start=0.01 )
_lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
_lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop(set_alpha_to_one=_snake_case , beta_start=0.01 )
_lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
_lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3
| 82 | def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
lowerCAmelCase = False
if num < 0:
lowerCAmelCase = True
lowerCAmelCase = -num
lowerCAmelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ = 5_0 ):
_UpperCamelCase : Union[str, Any] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 83 | class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = name
lowerCAmelCase = value
lowerCAmelCase = weight
def __repr__( self ) ->str:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
return self.value
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
return self.name
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
return self.weight
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
return self.value / self.weight
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int:
lowerCAmelCase = []
for i in range(len(snake_case__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
lowerCAmelCase = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ )
lowerCAmelCase = []
lowerCAmelCase , lowerCAmelCase = 0.0, 0.0
for i in range(len(snake_case__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = multiprocessing.Manager()
lowerCAmelCase_ :Union[str, Any] = manager.list()
lowerCAmelCase_ :Any = multiprocessing.Process(target=lowercase__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("""timed out""" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple ) -> List[str]:
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
lowerCAmelCase_ :Union[str, Any] = shutil.rmtree
lowerCAmelCase_ :str = os.rmdir
lowerCAmelCase_ :Any = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
lowerCAmelCase_ :Any = {}
with swallow_io():
with time_limit(lowercase__ ):
exec(lowercase__ , lowercase__ )
result.append("""passed""" )
except TimeoutException:
result.append("""timed out""" )
except BaseException as e:
result.append(f"""failed: {e}""" )
# Needed for cleaning up.
lowerCAmelCase_ :Dict = rmtree
lowerCAmelCase_ :List[str] = rmdir
lowerCAmelCase_ :int = chdir
@contextlib.contextmanager
def _snake_case ( lowercase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
def signal_handler(lowercase__ : List[Any] , lowercase__ : Dict ):
raise TimeoutException("""Timed out!""" )
signal.setitimer(signal.ITIMER_REAL , lowercase__ )
signal.signal(signal.SIGALRM , lowercase__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def _snake_case ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowercase__ ):
with contextlib.redirect_stderr(lowercase__ ):
with redirect_stdin(lowercase__ ):
yield
@contextlib.contextmanager
def _snake_case ( ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowercase__ ):
yield dirname
class _SCREAMING_SNAKE_CASE ( A__ ):
pass
class _SCREAMING_SNAKE_CASE ( io.StringIO ):
def __lowerCAmelCase ( self , *__A , **__A ) -> List[str]:
raise OSError
def __lowerCAmelCase ( self , *__A , **__A ) -> Optional[int]:
raise OSError
def __lowerCAmelCase ( self , *__A , **__A ) -> List[Any]:
raise OSError
def __lowerCAmelCase ( self , *__A , **__A ) -> Dict:
return False
class _SCREAMING_SNAKE_CASE ( contextlib._RedirectStream ): # type: ignore
UpperCAmelCase_ :Union[str, Any] = "stdin"
@contextlib.contextmanager
def _snake_case ( lowercase__ : Dict ) -> Dict:
'''simple docstring'''
if root == ".":
yield
return
lowerCAmelCase_ :List[Any] = os.getcwd()
os.chdir(lowercase__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowercase__ )
def _snake_case ( lowercase__ : List[str]=None ) -> List[str]:
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :Optional[int] = None
import os
lowerCAmelCase_ :List[str] = """1"""
lowerCAmelCase_ :Tuple = None
lowerCAmelCase_ :Union[str, Any] = None
lowerCAmelCase_ :Dict = None
lowerCAmelCase_ :Dict = None
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :Dict = None
lowerCAmelCase_ :Union[str, Any] = None
lowerCAmelCase_ :List[Any] = None
lowerCAmelCase_ :Optional[Any] = None
lowerCAmelCase_ :Dict = None
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :str = None
lowerCAmelCase_ :Optional[Any] = None
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :int = None
lowerCAmelCase_ :List[str] = None
lowerCAmelCase_ :Union[str, Any] = None
lowerCAmelCase_ :List[str] = None
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :Optional[Any] = None
lowerCAmelCase_ :Dict = None
lowerCAmelCase_ :List[str] = None
lowerCAmelCase_ :Optional[Any] = None
lowerCAmelCase_ :List[str] = None
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :Dict = None
import shutil
lowerCAmelCase_ :Union[str, Any] = None
lowerCAmelCase_ :int = None
lowerCAmelCase_ :int = None
import subprocess
lowerCAmelCase_ :Dict = None # type: ignore
lowerCAmelCase_ :Dict = None
import sys
lowerCAmelCase_ :Tuple = None
lowerCAmelCase_ :int = None
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :Union[str, Any] = None
lowerCAmelCase_ :Optional[Any] = None
| 84 | import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowercase__ : Dict = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowercase__ : Optional[int] = [0, 2_5, 5_0]
lowercase__ : Union[str, Any] = [2_5, 5_0, 7_5]
lowercase__ : int = fuzz.membership.trimf(X, abca)
lowercase__ : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowercase__ : List[str] = np.ones(7_5)
lowercase__ : Any = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
lowercase__ : Union[str, Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowercase__ : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowercase__ : Union[str, Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowercase__ : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowercase__ : Any = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowercase__ : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowercase__ : Tuple = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowercase__ : Tuple = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 338 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ["CLIPFeatureExtractor"]
_SCREAMING_SNAKE_CASE : Dict = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->Optional[Any]:
lowerCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ):
if i == len(__SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase = -1
else:
lowerCAmelCase = timesteps[i + 1]
lowerCAmelCase = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 1, 0]
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
| 338 | 0 |
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
requires_backends(self , 'decord' )
self.check_model_type(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
__lowerCAmelCase : Union[str, Any] = {}
if frame_sampling_rate is not None:
__lowerCAmelCase : Optional[int] = frame_sampling_rate
if num_frames is not None:
__lowerCAmelCase : int = num_frames
__lowerCAmelCase : Any = {}
if top_k is not None:
__lowerCAmelCase : Optional[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1 ):
if num_frames is None:
__lowerCAmelCase : Union[str, Any] = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
__lowerCAmelCase : Tuple = BytesIO(requests.get(_SCREAMING_SNAKE_CASE ).content )
__lowerCAmelCase : str = VideoReader(_SCREAMING_SNAKE_CASE )
videoreader.seek(0 )
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : str = num_frames * frame_sampling_rate - 1
__lowerCAmelCase : Union[str, Any] = np.linspace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num=_SCREAMING_SNAKE_CASE , dtype=np.intaa )
__lowerCAmelCase : int = videoreader.get_batch(_SCREAMING_SNAKE_CASE ).asnumpy()
__lowerCAmelCase : Dict = list(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
return model_inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self.model(**_SCREAMING_SNAKE_CASE )
return model_outputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 ):
if top_k > self.model.config.num_labels:
__lowerCAmelCase : List[str] = self.model.config.num_labels
if self.framework == "pt":
__lowerCAmelCase : Union[str, Any] = model_outputs.logits.softmax(-1 )[0]
__lowerCAmelCase , __lowerCAmelCase : Any = probs.topk(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__lowerCAmelCase : Any = scores.tolist()
__lowerCAmelCase : List[str] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] | 86 | import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase__ : str = logging.get_logger(__name__)
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = """AutoTokenizer"""
UpperCAmelCase_ : Optional[int] = ["""tokenizer"""]
UpperCAmelCase_ : str = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = speaker_embeddings
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , **__SCREAMING_SNAKE_CASE ) ->Tuple:
if speaker_embeddings_dict_path is not None:
lowerCAmelCase = get_file_from_repo(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , )
if speaker_embeddings_path is None:
logger.warning(
F"`{os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
lowerCAmelCase = None
else:
with open(__SCREAMING_SNAKE_CASE ) as speaker_embeddings_json:
lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = None
lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return cls(tokenizer=__SCREAMING_SNAKE_CASE , speaker_embeddings=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , __SCREAMING_SNAKE_CASE="speaker_embeddings" , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ) ->int:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''v2''' ) , exist_ok=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
lowerCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , __SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}.npy" )
lowerCAmelCase = tmp_dict
with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , '''w''' ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = self.speaker_embeddings[voice_preset]
lowerCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
lowerCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , )
if path is None:
raise ValueError(
F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE )
return voice_preset_dict
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None ) ->Tuple:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="pt" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) ->int:
if voice_preset is not None and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE )
else:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not voice_preset.endswith('''.npz''' ):
lowerCAmelCase = voice_preset + '''.npz'''
lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE )
if voice_preset is not None:
self._validate_voice_preset_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCAmelCase = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if voice_preset is not None:
lowerCAmelCase = voice_preset
return encoded_text
| 338 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class snake_case_ ( __A ):
__A : Dict = "philschmid/bart-large-cnn-samsum"
__A : List[str] = (
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
__A : List[str] = "summarizer"
__A : Union[str, Any] = AutoTokenizer
__A : List[str] = AutoModelForSeqaSeqLM
__A : Union[str, Any] = ["text"]
__A : Optional[int] = ["text"]
def __UpperCamelCase ( self : List[Any] , lowercase_ : Optional[int] ) -> str:
return self.pre_processor(lowercase_ , return_tensors="pt" , truncation=lowercase_ )
def __UpperCamelCase ( self : List[str] , lowercase_ : List[str] ) -> str:
return self.model.generate(**lowercase_ )[0]
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Dict ) -> Optional[Any]:
return self.pre_processor.decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
| 87 | import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 338 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str=7 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : int=18 , UpperCamelCase__ : List[Any]=30 , UpperCamelCase__ : Optional[int]=400 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=None , UpperCamelCase__ : int=True , ) -> Tuple:
"""simple docstring"""
__magic_name__ = size if size is not None else {"""height""": 18, """width""": 18}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = apply_ocr
def _lowercase ( self : List[str] ) -> str:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase_ ( _A , unittest.TestCase ):
'''simple docstring'''
a__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = LayoutLMvaImageProcessingTester(self )
@property
def _lowercase ( self : int ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """apply_ocr""" ) )
def _lowercase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def _lowercase ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def _lowercase ( self : str ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , UpperCamelCase__ )
self.assertIsInstance(encoding.boxes , UpperCamelCase__ )
# Test batched
__magic_name__ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _lowercase ( self : Dict ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _lowercase ( self : int ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
__magic_name__ = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
__magic_name__ = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
__magic_name__ = image_processing(UpperCamelCase__ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__magic_name__ = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
__magic_name__ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCamelCase__ )
self.assertListEqual(encoding.boxes , UpperCamelCase__ )
# with apply_OCR = False
__magic_name__ = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ )
__magic_name__ = image_processing(UpperCamelCase__ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 88 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase__ : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowercase__ : Dict = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
lowerCAmelCase = self.transformer_dir
shutil.copy(
os.path.join(__SCREAMING_SNAKE_CASE , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Union[str, Any]:
lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' , newline='''\n''' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
self.assertTrue(f.read() , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __SCREAMING_SNAKE_CASE , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with a really long name
lowerCAmelCase = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('''Bert''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __SCREAMING_SNAKE_CASE , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
self.assertFalse(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 338 | 0 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : List[str] = 'efficientformer'
def __init__( self : Optional[int] ,_UpperCAmelCase : List[int] = [3, 2, 6, 4] ,_UpperCAmelCase : List[int] = [48, 96, 224, 448] ,_UpperCAmelCase : List[bool] = [True, True, True, True] ,_UpperCAmelCase : int = 448 ,_UpperCAmelCase : int = 32 ,_UpperCAmelCase : int = 4 ,_UpperCAmelCase : int = 7 ,_UpperCAmelCase : int = 5 ,_UpperCAmelCase : int = 8 ,_UpperCAmelCase : int = 4 ,_UpperCAmelCase : float = 0.0 ,_UpperCAmelCase : int = 16 ,_UpperCAmelCase : int = 3 ,_UpperCAmelCase : int = 3 ,_UpperCAmelCase : int = 3 ,_UpperCAmelCase : int = 2 ,_UpperCAmelCase : int = 1 ,_UpperCAmelCase : float = 0.0 ,_UpperCAmelCase : int = 1 ,_UpperCAmelCase : bool = True ,_UpperCAmelCase : bool = True ,_UpperCAmelCase : float = 1E-5 ,_UpperCAmelCase : str = "gelu" ,_UpperCAmelCase : float = 0.02 ,_UpperCAmelCase : float = 1E-12 ,_UpperCAmelCase : int = 224 ,_UpperCAmelCase : float = 1E-05 ,**_UpperCAmelCase : Union[str, Any] ,):
super().__init__(**_UpperCAmelCase )
_a : Optional[Any] = hidden_act
_a : int = hidden_dropout_prob
_a : Optional[int] = hidden_sizes
_a : int = num_hidden_layers
_a : Optional[Any] = num_attention_heads
_a : Union[str, Any] = initializer_range
_a : List[str] = layer_norm_eps
_a : List[str] = patch_size
_a : Tuple = num_channels
_a : Optional[Any] = depths
_a : str = mlp_expansion_ratio
_a : Dict = downsamples
_a : List[str] = dim
_a : str = key_dim
_a : str = attention_ratio
_a : int = resolution
_a : List[Any] = pool_size
_a : Any = downsample_patch_size
_a : str = downsample_stride
_a : Tuple = downsample_pad
_a : List[str] = drop_path_rate
_a : List[Any] = num_metaad_blocks
_a : str = distillation
_a : Union[str, Any] = use_layer_scale
_a : Any = layer_scale_init_value
_a : List[Any] = image_size
_a : List[Any] = batch_norm_eps
| 89 | import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = split_dict._to_yaml_list()
assert len(snake_case__ ) == len(snake_case__ )
lowerCAmelCase = SplitDict._from_yaml_list(snake_case__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=snake_case__ ), SplitInfo(dataset_name='''my_dataset''' )] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 338 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 1000 , UpperCamelCase__ : bool = True ) -> int:
"""simple docstring"""
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
return int((number_a + number_a) / 2 )
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> None:
"""simple docstring"""
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(UpperCamelCase__ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
__lowerCamelCase = lower
__lowerCamelCase = higher
__lowerCamelCase = []
while True:
__lowerCamelCase = get_avg(UpperCamelCase__ , UpperCamelCase__ )
last_numbers.append(UpperCamelCase__ )
if answer(UpperCamelCase__ ) == "low":
__lowerCamelCase = number
elif answer(UpperCamelCase__ ) == "high":
__lowerCamelCase = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
__lowerCamelCase = int(input('Enter lower value : ' ).strip() )
__lowerCamelCase = int(input('Enter high value : ' ).strip() )
__lowerCamelCase = int(input('Enter value to guess : ' ).strip() )
guess_the_number(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 90 | import unittest
import numpy as np
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ) -> np.ndarray:
lowerCAmelCase = np.shape(snake_case__ )
lowerCAmelCase = np.shape(snake_case__ )
lowerCAmelCase = np.shape(snake_case__ )
if shape_a[0] != shape_b[0]:
lowerCAmelCase = (
'''Expected the same number of rows for A and B. '''
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(snake_case__ )
if shape_b[1] != shape_c[1]:
lowerCAmelCase = (
'''Expected the same number of columns for B and C. '''
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(snake_case__ )
lowerCAmelCase = pseudo_inv
if a_inv is None:
try:
lowerCAmelCase = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
lowerCAmelCase = schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.block([[a, b], [b.T, c]] )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 338 | 0 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
UpperCAmelCase_ : Any = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
UpperCAmelCase_ : str = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
UpperCAmelCase_ : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
UpperCAmelCase_ : Tuple = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
UpperCAmelCase_ : Union[str, Any] = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
UpperCAmelCase_ : Optional[Any] = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
UpperCAmelCase_ : Dict = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
UpperCAmelCase_ : Union[str, Any] = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
UpperCAmelCase_ : List[Any] = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
UpperCAmelCase_ : Dict = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
UpperCAmelCase_ : int = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
UpperCAmelCase_ : Optional[int] = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
UpperCAmelCase_ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModel)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ : Union[str, Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ : Union[str, Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ : Dict = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ : List[str] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ : Optional[int] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ : Any = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Dict = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ : Any = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 91 | import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowercase__ : Any = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
lowerCAmelCase = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
lowercase__ : List[Any] = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = list(s_dict.keys() )
for key in keys:
lowerCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowerCAmelCase = new_key.replace(snake_case__ , snake_case__ )
print(f"{key} -> {new_key}" )
lowerCAmelCase = s_dict.pop(snake_case__ )
return s_dict
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = emb.weight.shape
lowerCAmelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
lowerCAmelCase = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> bytes:
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase = os.path.basename(snake_case__ )
lowerCAmelCase = url.split('''/''' )[-2]
lowerCAmelCase = os.path.join(snake_case__ , snake_case__ )
if os.path.exists(snake_case__ ) and not os.path.isfile(snake_case__ ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(snake_case__ ):
lowerCAmelCase = open(snake_case__ , '''rb''' ).read()
if hashlib.shaaaa(snake_case__ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(snake_case__ ) as source, open(snake_case__ , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=snake_case__ , unit_divisor=1_0_2_4 ) as loop:
while True:
lowerCAmelCase = source.read(8_1_9_2 )
if not buffer:
break
output.write(snake_case__ )
loop.update(len(snake_case__ ) )
lowerCAmelCase = open(snake_case__ , '''rb''' ).read()
if hashlib.shaaaa(snake_case__ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
if ".pt" not in checkpoint_path:
lowerCAmelCase = _download(_MODELS[checkpoint_path] )
else:
lowerCAmelCase = torch.load(snake_case__ , map_location='''cpu''' )
lowerCAmelCase = original_checkpoint['''dims''']
lowerCAmelCase = original_checkpoint['''model_state_dict''']
lowerCAmelCase = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(snake_case__ )
rename_keys(snake_case__ )
lowerCAmelCase = True
lowerCAmelCase = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
lowerCAmelCase = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=snake_case__ , decoder_ffn_dim=snake_case__ , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
lowerCAmelCase = WhisperForConditionalGeneration(snake_case__ )
lowerCAmelCase , lowerCAmelCase = model.model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0 and not set(snake_case__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCAmelCase = proj_out_weights
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase__ : int = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 338 | 0 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE_ : int | float | str , SCREAMING_SNAKE_CASE_ : int | float | str ):
if nth_term == "":
return [""]
__lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = []
for temp in range(int(SCREAMING_SNAKE_CASE_ ) ):
series.append(F"""1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE_ ) )}""" if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = int(input("""Enter the last number (nth term) of the P-Series"""))
UpperCamelCase__ = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 92 | from ...processing_utils import ProcessorMixin
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = ["""image_processor""", """feature_extractor"""]
UpperCAmelCase_ : Optional[int] = """TvltImageProcessor"""
UpperCAmelCase_ : Optional[int] = """TvltFeatureExtractor"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
super().__init__(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor
lowerCAmelCase = feature_extractor
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) ->List[Any]:
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
lowerCAmelCase = None
if images is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , mask_pixel=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if images_mixed is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , is_mixed=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if audio is not None:
lowerCAmelCase = self.feature_extractor(
__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , mask_audio=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
if audio is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images_mixed_dict is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
return output_dict
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.image_processor.model_input_names
lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 338 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['''torch''', '''scipy''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
| 93 | def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[str]:
lowerCAmelCase = len(snake_case__ )
for i in range(length - 1 ):
lowerCAmelCase = i
for k in range(i + 1 , snake_case__ ):
if collection[k] < collection[least]:
lowerCAmelCase = k
if least != i:
lowerCAmelCase , lowerCAmelCase = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowercase__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
lowercase__ : str = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 338 | 0 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __lowerCamelCase ( UpperCAmelCase_ : ndarray ):
"""simple docstring"""
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ )
class _snake_case :
def __init__( self , *,
_lowerCamelCase = np.inf , _lowerCamelCase = "linear" , _lowerCamelCase = 0.0 , ):
a :List[str] = regularization
a :Optional[Any] = gamma
if kernel == "linear":
a :Optional[Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
a :List[Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
a :Dict = F'''Unknown kernel: {kernel}'''
raise ValueError(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
return np.dot(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
a :str = observations
a :Any = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((a) , ) :Tuple = np.shape(_lowerCamelCase )
def to_minimize(_lowerCamelCase ) -> float:
a :Union[str, Any] = 0
((a) , ) :Tuple = np.shape(_lowerCamelCase )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowerCamelCase )
a :str = LinearConstraint(_lowerCamelCase , 0 , 0 )
a :Tuple = Bounds(0 , self.regularization )
a :List[str] = minimize(
_lowerCamelCase , np.ones(_lowerCamelCase ) , bounds=_lowerCamelCase , constraints=[ly_contraint] ).x
a :str = l_star
# calculating mean offset of separation plane to points
a :Tuple = 0
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
a :Optional[Any] = s / n
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :List[str] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowerCamelCase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 | import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=19 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) ->Union[str, Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__SCREAMING_SNAKE_CASE , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = EsmForProteinFolding(config=__SCREAMING_SNAKE_CASE ).float()
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Dict = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCAmelCase_ : List[Any] = ()
UpperCAmelCase_ : Tuple = {} if is_torch_available() else {}
UpperCAmelCase_ : List[str] = False
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = EsmFoldModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip('''Does not support attention outputs''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
pass
@unittest.skip
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold only has one output format.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
@require_torch
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
lowerCAmelCase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )['''positions''']
lowerCAmelCase = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 338 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : int = VideoToVideoSDPipeline
_lowercase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""}) - {"""image""", """width""", """height"""}
_lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""}) - {"""image"""}
_lowercase : Any = PipelineTesterMixin.required_optional_params - {"""latents"""}
_lowercase : Dict = False
# No `output_type`.
_lowercase : Tuple = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
])
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
a__ : int =UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=3_2 , attention_head_dim=4 , )
a__ : str =DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
a__ : Optional[Any] =AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
a__ : List[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
a__ : Union[str, Any] =CLIPTextModel(lowerCAmelCase__ )
a__ : Optional[Any] =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
a__ : Union[str, Any] ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Optional[int]:
'''simple docstring'''
a__ : Dict =floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith("mps" ):
a__ : List[Any] =torch.manual_seed(lowerCAmelCase__ )
else:
a__ : List[str] =torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
a__ : Union[str, Any] ={
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] ="cpu" # ensure determinism for the device-dependent torch.Generator
a__ : Tuple =self.get_dummy_components()
a__ : int =VideoToVideoSDPipeline(**lowerCAmelCase__ )
a__ : Any =sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : List[Any] =self.get_dummy_inputs(lowerCAmelCase__ )
a__ : List[Any] ="np"
a__ : Any =sd_pipe(**lowerCAmelCase__ ).frames
a__ : List[str] =frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
a__ : Optional[Any] =np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase__ , expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _lowercase ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def _lowercase ( self ) -> Any:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Dict =VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
a__ : Optional[Any] =torch.Generator(device="cpu" ).manual_seed(0 )
a__ : Optional[Any] =torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=lowerCAmelCase__ )
a__ : List[str] =video.to("cuda" )
a__ : Tuple ="Spiderman is surfing"
a__ : int =pipe(lowerCAmelCase__ , video=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=3 , output_type="pt" ).frames
a__ : int =np.array([-1.0_45_89_84, -1.1_27_92_97, -0.9_66_30_86, -0.91_50_39_06, -0.75_09_76_56] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 95 | import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = ["""image_processor""", """tokenizer"""]
UpperCAmelCase_ : int = """OwlViTImageProcessor"""
UpperCAmelCase_ : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
lowerCAmelCase = kwargs.pop('''feature_extractor''' )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="max_length" , __SCREAMING_SNAKE_CASE="np" , **__SCREAMING_SNAKE_CASE ) ->int:
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or (isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(text[0] , __SCREAMING_SNAKE_CASE )):
lowerCAmelCase = [self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )]
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(text[0] , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = []
# Maximum number of queries across batch
lowerCAmelCase = max([len(__SCREAMING_SNAKE_CASE ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__SCREAMING_SNAKE_CASE ) != max_num_queries:
lowerCAmelCase = t + [''' '''] * (max_num_queries - len(__SCREAMING_SNAKE_CASE ))
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
encodings.append(__SCREAMING_SNAKE_CASE )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
lowerCAmelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCAmelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCAmelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
lowerCAmelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCAmelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = input_ids
lowerCAmelCase = attention_mask
if query_images is not None:
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).pixel_values
lowerCAmelCase = query_pixel_values
if images is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) , tensor_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Optional[int]:
return self.image_processor.post_process(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Any:
return self.image_processor.post_process_object_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Tuple:
return self.image_processor.post_process_image_guided_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->str:
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 338 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker | 96 | import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''spiece.model'''}
lowercase__ : Optional[int] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
lowercase__ : Any = {
'''albert-base-v1''': 5_1_2,
'''albert-large-v1''': 5_1_2,
'''albert-xlarge-v1''': 5_1_2,
'''albert-xxlarge-v1''': 5_1_2,
'''albert-base-v2''': 5_1_2,
'''albert-large-v2''': 5_1_2,
'''albert-xlarge-v2''': 5_1_2,
'''albert-xxlarge-v2''': 5_1_2,
}
lowercase__ : Tuple = '''▁'''
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) ->None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase = (
AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE , normalized=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else mask_token
)
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) ->int:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Any:
if self.remove_space:
lowerCAmelCase = ''' '''.join(inputs.strip().split() )
else:
lowerCAmelCase = inputs
lowerCAmelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowerCAmelCase = unicodedata.normalize('''NFKD''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = ''''''.join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
lowerCAmelCase = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = self.preprocess_text(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
for piece in pieces:
if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase = cur_pieces[1:]
else:
lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__SCREAMING_SNAKE_CASE )
else:
new_pieces.append(__SCREAMING_SNAKE_CASE )
return new_pieces
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
lowerCAmelCase = []
lowerCAmelCase = ''''''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 338 | 0 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowercase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 1.0 , UpperCamelCase_ = None , ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :Dict = initial_learning_rate
UpperCamelCase__ :Optional[int] = warmup_steps
UpperCamelCase__ :str = power
UpperCamelCase__ :Dict = decay_schedule_fn
UpperCamelCase__ :List[Any] = name
def __call__( self , UpperCamelCase_ ):
'''simple docstring'''
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
UpperCamelCase__ :int = tf.cast(UpperCamelCase_ , tf.floataa )
UpperCamelCase__ :int = tf.cast(self.warmup_steps , tf.floataa )
UpperCamelCase__ :Any = global_step_float / warmup_steps_float
UpperCamelCase__ :Union[str, Any] = self.initial_learning_rate * tf.math.pow(UpperCamelCase_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def a ( __a , __a , __a , __a = 0.0 , __a = 0.9 , __a = 0.9_9_9 , __a = 1e-8 , __a = None , __a = None , __a = 0.0 , __a = 1.0 , __a = None , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__a , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__a , )
if num_warmup_steps:
UpperCamelCase__ :int = WarmUp(
initial_learning_rate=__a , decay_schedule_fn=__a , warmup_steps=__a , )
if weight_decay_rate > 0.0:
UpperCamelCase__ :int = AdamWeightDecay(
learning_rate=__a , weight_decay_rate=__a , beta_a=__a , beta_a=__a , epsilon=__a , clipnorm=__a , global_clipnorm=__a , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=__a , )
else:
UpperCamelCase__ :Optional[int] = tf.keras.optimizers.Adam(
learning_rate=__a , beta_a=__a , beta_a=__a , epsilon=__a , clipnorm=__a , global_clipnorm=__a , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ = 0.001 , UpperCamelCase_ = 0.9 , UpperCamelCase_ = 0.999 , UpperCamelCase_ = 1e-7 , UpperCamelCase_ = False , UpperCamelCase_ = 0.0 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "AdamWeightDecay" , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Any = weight_decay_rate
UpperCamelCase__ :Union[str, Any] = include_in_weight_decay
UpperCamelCase__ :Optional[int] = exclude_from_weight_decay
@classmethod
def lowerCAmelCase__ ( cls , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = {'''WarmUp''': WarmUp}
return super(UpperCamelCase_ , cls ).from_config(UpperCamelCase_ , custom_objects=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
super(UpperCamelCase_ , self )._prepare_local(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :List[str] = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None , **UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :List[str] = list(zip(*UpperCamelCase_ ) )
return super(UpperCamelCase_ , self ).apply_gradients(zip(UpperCamelCase_ , UpperCamelCase_ ) , name=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
UpperCamelCase__ :Dict = apply_state or {}
UpperCamelCase__ :str = apply_state.get((var_device, var_dtype) )
if coefficients is None:
UpperCamelCase__ :List[str] = self._fallback_apply_state(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
UpperCamelCase__ :Dict = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_dense(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
UpperCamelCase__ :Dict = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_sparse(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return False
return True
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = []
UpperCamelCase__ :Tuple = None
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self._accum_steps is None:
UpperCamelCase__ :List[str] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , UpperCamelCase_ ):
'''simple docstring'''
if not self._gradients:
UpperCamelCase__ :Tuple = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCamelCase_ ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCamelCase_ ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase_ )}''' )
for accum_gradient, gradient in zip(self._gradients , UpperCamelCase_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCamelCase_ )
self._accum_steps.assign_add(1 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCamelCase_ ) ) | 97 | import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = (DEISMultistepScheduler,)
UpperCAmelCase_ : int = (("""num_inference_steps""", 25),)
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->str:
lowerCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase , lowerCAmelCase = sample, sample
for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
if scheduler is None:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase = scheduler.timesteps[5]
lowerCAmelCase = scheduler.timesteps[6]
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , algorithm_type='''deis''' , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = self.full_loop(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
assert not torch.isnan(__SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
| 338 | 0 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def a_ ( lowerCamelCase = "isbn/0140328726" ):
UpperCAmelCase__ = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
UpperCAmelCase__ = f'''{olid} is not a valid Open Library olid'''
raise ValueError(lowerCamelCase )
return requests.get(f'''https://openlibrary.org/{new_olid}.json''' ).json()
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
UpperCAmelCase__ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCAmelCase__ = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
UpperCAmelCase__ = data['First sentence']['value']
for key, value in data.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = ', '.join(lowerCamelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCAmelCase__ : int = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
lowerCAmelCase__ : Dict = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print('\n'.join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 98 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = '''google/ncsnpp-celebahq-256'''
lowerCAmelCase = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 338 | 0 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowercase : Union[str, Any] = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
lowercase : Union[str, Any] = None
def A_ ( ) -> Dict:
a__ : Dict = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=A__ , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=A__ , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def A_ ( A__ ) -> int:
a__ : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__ : Optional[int] = bool(qa['answers']['text'] )
return qid_to_has_ans
def A_ ( A__ ) -> List[Any]:
def remove_articles(A__ ):
return ARTICLES_REGEX.sub(' ' , A__ )
def white_space_fix(A__ ):
return " ".join(text.split() )
def remove_punc(A__ ):
a__ : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A__ ) ) ) )
def A_ ( A__ ) -> Union[str, Any]:
if not s:
return []
return normalize_answer(A__ ).split()
def A_ ( A__ , A__ ) -> Optional[Any]:
return int(normalize_answer(A__ ) == normalize_answer(A__ ) )
def A_ ( A__ , A__ ) -> Any:
a__ : Tuple = get_tokens(A__ )
a__ : Optional[int] = get_tokens(A__ )
a__ : int = collections.Counter(A__ ) & collections.Counter(A__ )
a__ : Optional[Any] = sum(common.values() )
if len(A__ ) == 0 or len(A__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a__ : int = 1.0 * num_same / len(A__ )
a__ : List[Any] = 1.0 * num_same / len(A__ )
a__ : Tuple = (2 * precision * recall) / (precision + recall)
return fa
def A_ ( A__ , A__ ) -> Any:
a__ : Tuple = {}
a__ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__ : Optional[int] = qa['id']
a__ : Any = [t for t in qa['answers']['text'] if normalize_answer(A__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a__ : List[str] = ['']
if qid not in preds:
print(F'Missing prediction for {qid}' )
continue
a__ : Union[str, Any] = preds[qid]
# Take max over all gold answers
a__ : Tuple = max(compute_exact(A__ , A__ ) for a in gold_answers )
a__ : List[Any] = max(compute_fa(A__ , A__ ) for a in gold_answers )
return exact_scores, fa_scores
def A_ ( A__ , A__ , A__ , A__ ) -> Tuple:
a__ : List[Any] = {}
for qid, s in scores.items():
a__ : Tuple = na_probs[qid] > na_prob_thresh
if pred_na:
a__ : str = float(not qid_to_has_ans[qid] )
else:
a__ : int = s
return new_scores
def A_ ( A__ , A__ , A__=None ) -> List[Any]:
if not qid_list:
a__ : str = len(A__ )
return collections.OrderedDict(
[
('exact', 1_00.0 * sum(exact_scores.values() ) / total),
('f1', 1_00.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
a__ : int = len(A__ )
return collections.OrderedDict(
[
('exact', 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def A_ ( A__ , A__ , A__ ) -> Optional[int]:
for k in new_eval:
a__ : Optional[int] = new_eval[k]
def A_ ( A__ , A__ , A__ , A__ ) -> Union[str, Any]:
plt.step(A__ , A__ , color='b' , alpha=0.2 , where='post' )
plt.fill_between(A__ , A__ , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(A__ )
plt.savefig(A__ )
plt.clf()
def A_ ( A__ , A__ , A__ , A__ , A__=None , A__=None ) -> Any:
a__ : str = sorted(A__ , key=lambda A__ : na_probs[k] )
a__ : Tuple = 0.0
a__ : List[str] = 1.0
a__ : Optional[int] = 0.0
a__ : Any = [1.0]
a__ : Optional[int] = [0.0]
a__ : Tuple = 0.0
for i, qid in enumerate(A__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a__ : Union[str, Any] = true_pos / float(i + 1 )
a__ : List[Any] = true_pos / float(A__ )
if i == len(A__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(A__ )
recalls.append(A__ )
if out_image:
plot_pr_curve(A__ , A__ , A__ , A__ )
return {"ap": 1_00.0 * avg_prec}
def A_ ( A__ , A__ , A__ , A__ , A__ , A__ ) -> str:
if out_image_dir and not os.path.exists(A__ ):
os.makedirs(A__ )
a__ : List[str] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a__ : Optional[int] = make_precision_recall_eval(
A__ , A__ , A__ , A__ , out_image=os.path.join(A__ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
a__ : Optional[int] = make_precision_recall_eval(
A__ , A__ , A__ , A__ , out_image=os.path.join(A__ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
a__ : int = {k: float(A__ ) for k, v in qid_to_has_ans.items()}
a__ : Optional[Any] = make_precision_recall_eval(
A__ , A__ , A__ , A__ , out_image=os.path.join(A__ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(A__ , A__ , 'pr_exact' )
merge_eval(A__ , A__ , 'pr_f1' )
merge_eval(A__ , A__ , 'pr_oracle' )
def A_ ( A__ , A__ , A__ , A__ ) -> List[Any]:
if not qid_list:
return
a__ : List[str] = [na_probs[k] for k in qid_list]
a__ : Dict = np.ones_like(A__ ) / float(len(A__ ) )
plt.hist(A__ , weights=A__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(F'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(A__ , F'na_prob_hist_{name}.png' ) )
plt.clf()
def A_ ( A__ , A__ , A__ , A__ ) -> Optional[int]:
a__ : Any = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a__ : List[str] = num_no_ans
a__ : Tuple = cur_score
a__ : Tuple = 0.0
a__ : str = sorted(A__ , key=lambda A__ : na_probs[k] )
for i, qid in enumerate(A__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a__ : str = scores[qid]
else:
if preds[qid]:
a__ : int = -1
else:
a__ : Tuple = 0
cur_score += diff
if cur_score > best_score:
a__ : Dict = cur_score
a__ : Tuple = na_probs[qid]
return 1_00.0 * best_score / len(A__ ), best_thresh
def A_ ( A__ , A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]:
a__ , a__ : str = find_best_thresh(A__ , A__ , A__ , A__ )
a__ , a__ : Tuple = find_best_thresh(A__ , A__ , A__ , A__ )
a__ : Optional[Any] = best_exact
a__ : Optional[Any] = exact_thresh
a__ : Tuple = best_fa
a__ : int = fa_thresh
def A_ ( ) -> Union[str, Any]:
with open(OPTS.data_file ) as f:
a__ : Optional[int] = json.load(A__ )
a__ : Any = dataset_json['data']
with open(OPTS.pred_file ) as f:
a__ : Optional[Any] = json.load(A__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a__ : int = json.load(A__ )
else:
a__ : Union[str, Any] = {k: 0.0 for k in preds}
a__ : str = make_qid_to_has_ans(A__ ) # maps qid to True/False
a__ : int = [k for k, v in qid_to_has_ans.items() if v]
a__ : Tuple = [k for k, v in qid_to_has_ans.items() if not v]
a__ , a__ : str = get_raw_scores(A__ , A__ )
a__ : str = apply_no_ans_threshold(A__ , A__ , A__ , OPTS.na_prob_thresh )
a__ : Union[str, Any] = apply_no_ans_threshold(A__ , A__ , A__ , OPTS.na_prob_thresh )
a__ : List[Any] = make_eval_dict(A__ , A__ )
if has_ans_qids:
a__ : str = make_eval_dict(A__ , A__ , qid_list=A__ )
merge_eval(A__ , A__ , 'HasAns' )
if no_ans_qids:
a__ : int = make_eval_dict(A__ , A__ , qid_list=A__ )
merge_eval(A__ , A__ , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(A__ , A__ , A__ , A__ , A__ , A__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(A__ , A__ , A__ , A__ , A__ , OPTS.out_image_dir )
histogram_na_prob(A__ , A__ , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(A__ , A__ , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(A__ , A__ )
else:
print(json.dumps(A__ , indent=2 ) )
if __name__ == "__main__":
lowercase : List[Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 99 | from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowercase__ : Dict = logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase_ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
lowerCAmelCase = self.get_masked_index(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Dict[str, GenericTensor]:
if return_tensors is None:
lowerCAmelCase = self.framework
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = self.model(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model_inputs['''input_ids''']
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=None ) ->str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase = target_ids.shape[0]
lowerCAmelCase = model_outputs['''input_ids'''][0]
lowerCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowerCAmelCase = outputs.numpy()
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
lowerCAmelCase = tf.gather_nd(tf.squeeze(__SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
lowerCAmelCase = tf.math.top_k(__SCREAMING_SNAKE_CASE , k=__SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
lowerCAmelCase = probs[..., target_ids]
lowerCAmelCase , lowerCAmelCase = probs.topk(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
lowerCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowerCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowerCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase = target_ids[p].tolist()
lowerCAmelCase = p
# Filter padding out:
lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [targets]
try:
lowerCAmelCase = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase = {}
lowerCAmelCase = []
for target in targets:
lowerCAmelCase = vocab.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if id_ is None:
lowerCAmelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , max_length=1 , truncation=__SCREAMING_SNAKE_CASE , )['''input_ids''']
if len(__SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
lowerCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
lowerCAmelCase = list(set(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
return target_ids
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->Dict:
lowerCAmelCase = {}
if targets is not None:
lowerCAmelCase = self.get_target_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = target_ids
if top_k is not None:
lowerCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 338 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCAmelCase__ , """embed_dim"""))
self.parent.assertTrue(hasattr(lowerCAmelCase__ , """num_heads"""))
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=6_4 , lowerCAmelCase__=3 , lowerCAmelCase__=[1_6, 4_8, 9_6] , lowerCAmelCase__=[1, 3, 6] , lowerCAmelCase__=[1, 2, 1_0] , lowerCAmelCase__=[7, 3, 3] , lowerCAmelCase__=[4, 2, 2] , lowerCAmelCase__=[2, 1, 1] , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=[False, False, True] , lowerCAmelCase__=[0.0, 0.0, 0.0] , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=2 , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_sizes
__SCREAMING_SNAKE_CASE = patch_stride
__SCREAMING_SNAKE_CASE = patch_padding
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = stride_kv
__SCREAMING_SNAKE_CASE = depth
__SCREAMING_SNAKE_CASE = cls_token
__SCREAMING_SNAKE_CASE = attention_drop_rate
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
# create a random int32 tensor of given shape
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels)
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = TFCvtModel(config=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , training=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for i in range(len(self.depth)):
__SCREAMING_SNAKE_CASE = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
__SCREAMING_SNAKE_CASE = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = TFCvtForImageClassification(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Optional[Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
__lowercase : Union[str, Any] = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
__lowercase : Optional[Any] = False
__lowercase : Union[str, Any] = False
__lowercase : Optional[int] = False
__lowercase : List[Any] = False
__lowercase : Any = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFCvtModelTester(self)
__SCREAMING_SNAKE_CASE = TFCvtConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7)
def snake_case_ ( self):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""")
def snake_case_ ( self):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""")
def snake_case_ ( self):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""")
def snake_case_ ( self):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""")) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def snake_case_ ( self):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""")) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def snake_case_ ( self):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""")
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = tf.keras.mixed_precision.Policy("""mixed_float16""")
tf.keras.mixed_precision.set_global_policy(lowerCAmelCase__)
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""")
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def snake_case_ ( self):
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = outputs.hidden_states
__SCREAMING_SNAKE_CASE = len(self.model_tester.depth)
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__)
@slow
def snake_case_ ( self):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = TFCvtModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case_ ( self):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=lowerCAmelCase__ , return_tensors="""tf""")
# forward pass
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__)
# verify the logits
__SCREAMING_SNAKE_CASE = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tf.constant([0.92_85, 0.90_15, -0.31_50])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCAmelCase__ , atol=1E-4))
| 100 | from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase__ : int = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowercase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 338 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ :List[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Tuple = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Any = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowercase__ :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 101 | lowercase__ : Optional[int] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def SCREAMING_SNAKE_CASE_ ( ) -> None:
lowerCAmelCase = input('''Enter message: ''' )
lowerCAmelCase = input('''Enter key [alphanumeric]: ''' )
lowerCAmelCase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowerCAmelCase = '''encrypt'''
lowerCAmelCase = encrypt_message(snake_case__ , snake_case__ )
elif mode.lower().startswith('''d''' ):
lowerCAmelCase = '''decrypt'''
lowerCAmelCase = decrypt_message(snake_case__ , snake_case__ )
print(f"\n{mode.title()}ed message:" )
print(snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
return translate_message(snake_case__ , snake_case__ , '''encrypt''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
return translate_message(snake_case__ , snake_case__ , '''decrypt''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> str:
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = key.upper()
for symbol in message:
lowerCAmelCase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case__ ):
lowerCAmelCase = 0
else:
translated.append(snake_case__ )
return "".join(snake_case__ )
if __name__ == "__main__":
main()
| 338 | 0 |
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =['input_values', 'attention_mask']
def __init__(self , a_ = 1 , a_ = 1_60_00 , a_ = 0.0 , a_ = False , a_ = 80 , a_ = 16 , a_ = 64 , a_ = "hann_window" , a_ = 1.0 , a_ = 80 , a_ = 76_00 , a_ = 1E-10 , a_ = 2 , a_ = True , **a_ , ):
'''simple docstring'''
super().__init__(feature_size=a_ , sampling_rate=a_ , padding_value=a_ , **a_ )
__snake_case : Optional[Any] = do_normalize
__snake_case : Optional[int] = return_attention_mask
__snake_case : int = num_mel_bins
__snake_case : List[str] = hop_length
__snake_case : List[Any] = win_length
__snake_case : Union[str, Any] = win_function
__snake_case : List[Any] = frame_signal_scale
__snake_case : List[Any] = fmin
__snake_case : int = fmax
__snake_case : Optional[int] = mel_floor
__snake_case : List[str] = reduction_factor
__snake_case : Union[str, Any] = win_length * sampling_rate // 10_00
__snake_case : Dict = hop_length * sampling_rate // 10_00
__snake_case : str = optimal_fft_length(self.sample_size )
__snake_case : Union[str, Any] = (self.n_fft // 2) + 1
__snake_case : Optional[int] = window_function(window_length=self.sample_size , name=self.win_function , periodic=a_ )
__snake_case : Dict = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , a_ , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , a_ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def SCREAMING_SNAKE_CASE (a_ , a_ , a_ = 0.0 ):
'''simple docstring'''
if attention_mask is not None:
__snake_case : Tuple = np.array(a_ , np.intaa )
__snake_case : Dict = []
for vector, length in zip(a_ , attention_mask.sum(-1 ) ):
__snake_case : str = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
__snake_case : Union[str, Any] = padding_value
normed_input_values.append(a_ )
else:
__snake_case : Optional[int] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def SCREAMING_SNAKE_CASE (self , a_ , ):
'''simple docstring'''
__snake_case : Any = spectrogram(
a_ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__(self , a_ = None , a_ = None , a_ = False , a_ = None , a_ = False , a_ = None , a_ = None , a_ = None , a_ = None , **a_ , ):
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
__snake_case : int = self._process_audio(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , **a_ , )
else:
__snake_case : Optional[int] = None
if audio_target is not None:
__snake_case : int = self._process_audio(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , **a_ , )
if inputs is None:
return inputs_target
else:
__snake_case : Union[str, Any] = inputs_target['''input_values''']
__snake_case : Union[str, Any] = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
__snake_case : Optional[Any] = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE (self , a_ , a_ = False , a_ = False , a_ = None , a_ = False , a_ = None , a_ = None , a_ = None , **a_ , ):
'''simple docstring'''
__snake_case : List[Any] = isinstance(a_ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__snake_case : int = is_batched_numpy or (
isinstance(a_ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__snake_case : Any = [np.asarray(a_ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(a_ , np.ndarray ):
__snake_case : Optional[int] = np.asarray(a_ , dtype=np.floataa )
elif isinstance(a_ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
__snake_case : Union[str, Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
__snake_case : List[Any] = [speech]
# needed to make pad() work on spectrogram inputs
__snake_case : List[str] = self.feature_size
# convert into correct format for padding
if is_target:
__snake_case : List[str] = [self._extract_mel_features(a_ ) for waveform in speech]
__snake_case : List[str] = BatchFeature({'''input_values''': features} )
__snake_case : Dict = self.num_mel_bins
else:
__snake_case : Dict = BatchFeature({'''input_values''': speech} )
__snake_case : Dict = self.pad(
a_ , padding=a_ , max_length=a_ , truncation=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , **a_ , )
__snake_case : List[Any] = feature_size_hack
# convert input values to correct format
__snake_case : Dict = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
__snake_case : Union[str, Any] = [np.asarray(a_ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(a_ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
__snake_case : Union[str, Any] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(a_ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
__snake_case : Optional[Any] = input_values.astype(np.floataa )
# convert attention_mask to correct format
__snake_case : Dict = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__snake_case : List[str] = [np.asarray(a_ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
__snake_case : Optional[int] = (
attention_mask
if self._get_padding_strategies(a_ , max_length=a_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__snake_case : Dict = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=a_ , padding_value=self.padding_value )
if return_tensors is not None:
__snake_case : Tuple = padded_inputs.convert_to_tensors(a_ )
return padded_inputs
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = super().to_dict()
# Don't serialize these as they are derived from the other properties.
__snake_case : Any = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 102 | from collections import defaultdict
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0_0_0_0_0 , snake_case__ = 1_0 ) -> int:
lowerCAmelCase = defaultdict(snake_case__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCAmelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCAmelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(snake_case__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f'{solution() = }')
| 338 | 0 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __snake_case ( unittest.TestCase ,UpperCamelCase_ ):
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : int = load_tool('''text-classification''')
self.tool.setup()
lowerCAmelCase_ : Optional[int] = load_tool('''text-classification''' , remote=A_)
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : Dict = self.tool('''That\'s quite cool''' , ['''positive''', '''negative'''])
self.assertEqual(A_ , '''positive''')
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Dict = self.remote_tool('''That\'s quite cool''' , ['''positive''', '''negative'''])
self.assertEqual(A_ , '''positive''')
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : str = self.tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''])
self.assertEqual(A_ , '''positive''')
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Union[str, Any] = self.remote_tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''])
self.assertEqual(A_ , '''positive''')
| 103 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Union[str, Any]:
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = features.copy() if features else default_expected_features
lowerCAmelCase = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase = TextDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
if issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase = text_path
elif issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase = [text_path]
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=("train",) ) -> Optional[Any]:
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase = TextDatasetReader({'''train''': text_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCAmelCase = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = features.copy() if features else default_expected_features
lowerCAmelCase = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase = TextDatasetReader({'''train''': text_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any:
if split:
lowerCAmelCase = {split: text_path}
else:
lowerCAmelCase = '''train'''
lowerCAmelCase = {'''train''': text_path, '''test''': text_path}
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 338 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'canine'
def __init__( self : str ,lowercase__ : Any=7_6_8 ,lowercase__ : List[str]=1_2 ,lowercase__ : Tuple=1_2 ,lowercase__ : List[Any]=3_0_7_2 ,lowercase__ : List[str]="gelu" ,lowercase__ : Optional[Any]=0.1 ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : Dict=1_6_3_8_4 ,lowercase__ : Tuple=1_6 ,lowercase__ : Any=0.0_2 ,lowercase__ : str=1e-1_2 ,lowercase__ : str=0 ,lowercase__ : Tuple=0xe_000 ,lowercase__ : Optional[int]=0xe_001 ,lowercase__ : List[str]=4 ,lowercase__ : List[str]=4 ,lowercase__ : List[Any]=8 ,lowercase__ : Optional[int]=1_6_3_8_4 ,lowercase__ : Union[str, Any]=1_2_8 ,**lowercase__ : List[str] ,):
super().__init__(pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,**lowercase__ )
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = type_vocab_size
__lowercase = layer_norm_eps
# Character config:
__lowercase = downsampling_rate
__lowercase = upsampling_kernel_size
__lowercase = num_hash_functions
__lowercase = num_hash_buckets
__lowercase = local_transformer_stride
| 104 | def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
lowerCAmelCase = False
if num < 0:
lowerCAmelCase = True
lowerCAmelCase = -num
lowerCAmelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=64 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> Optional[Any]:
a : Dict = parent
a : List[str] = batch_size
a : Any = seq_length
a : Dict = is_training
a : List[str] = use_input_mask
a : Any = use_token_type_ids
a : List[Any] = use_labels
a : Tuple = vocab_size
a : List[Any] = hidden_size
a : Optional[int] = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : str = intermediate_size
a : Dict = hidden_act
a : int = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : int = max_position_embeddings
a : Optional[Any] = type_vocab_size
a : Any = type_sequence_label_size
a : List[str] = initializer_range
a : List[Any] = num_labels
a : Optional[int] = num_choices
a : Optional[Any] = scope
a : List[str] = vocab_size - 1
def __a ( self ) -> List[Any]:
a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Tuple = None
if self.use_input_mask:
a : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
a : List[str] = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def __a ( self ) -> Any:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def __a ( self ) -> List[Any]:
a, a, a, a : List[Any] = self.prepare_config_and_inputs()
a : int = True
return config, input_ids, input_mask, token_labels
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
a : Dict = GPTNeoXModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : List[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
a : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
a : str = True
a : Dict = GPTNeoXModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
a : Optional[int] = GPTNeoXForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
a : Union[str, Any] = self.num_labels
a : Optional[int] = GPTNeoXForQuestionAnswering(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
a : Union[str, Any] = self.num_labels
a : Union[str, Any] = GPTNeoXForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
a : List[str] = self.num_labels
a : Tuple = GPTNeoXForTokenClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
a : Any = True
a : Union[str, Any] = GPTNeoXForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# first forward pass
a : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
a : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
a : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
a : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
a : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
a : Union[str, Any] = output_from_no_past["hidden_states"][0]
a : int = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["hidden_states"][0]
# select random slice
a : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
a : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> Any:
a : Any = self.prepare_config_and_inputs()
a, a, a, a : int = config_and_inputs
a : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
lowerCamelCase : List[Any] =(
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : str =(GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase : List[Any] =(
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : List[str] =False
lowerCamelCase : Optional[Any] =False
lowerCamelCase : int =False
lowerCamelCase : List[Any] =False
def __a ( self ) -> Optional[int]:
a : Optional[int] = GPTNeoXModelTester(self )
a : Tuple = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=64 , num_attention_heads=8 )
def __a ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __a ( self ) -> List[str]:
a, a, a, a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Optional[Any]:
a, a, a, a : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Dict:
# This regression test was failing with PyTorch < 1.3
a, a, a, a : str = self.model_tester.prepare_config_and_inputs_for_decoder()
a : Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
a, a, a, a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase__ )
def __a ( self ) -> str:
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def __a ( self ) -> int:
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@unittest.skip(reason="Feed forward chunking is not implemented" )
def __a ( self ) -> int:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __a ( self , lowerCAmelCase__ ) -> List[Any]:
a, a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
a : List[str] = ids_tensor([1, 10] , config.vocab_size )
a : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a : str = GPTNeoXModel(lowerCAmelCase__ )
original_model.to(lowerCAmelCase__ )
original_model.eval()
a : str = original_model(lowerCAmelCase__ ).last_hidden_state
a : Dict = original_model(lowerCAmelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a : int = {"type": scaling_type, "factor": 10.0}
a : Optional[int] = GPTNeoXModel(lowerCAmelCase__ )
scaled_model.to(lowerCAmelCase__ )
scaled_model.eval()
a : Optional[Any] = scaled_model(lowerCAmelCase__ ).last_hidden_state
a : Optional[int] = scaled_model(lowerCAmelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> List[Any]:
a : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/pythia-410m-deduped" )
for checkpointing in [True, False]:
a : int = GPTNeoXForCausalLM.from_pretrained("EleutherAI/pythia-410m-deduped" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCAmelCase__ )
a : Any = tokenizer("My favorite food is" , return_tensors="pt" ).to(lowerCAmelCase__ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
a : int = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"
a : Optional[Any] = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=20 )
a : str = tokenizer.batch_decode(lowerCAmelCase__ )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 105 | class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = name
lowerCAmelCase = value
lowerCAmelCase = weight
def __repr__( self ) ->str:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
return self.value
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
return self.name
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
return self.weight
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
return self.value / self.weight
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int:
lowerCAmelCase = []
for i in range(len(snake_case__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
lowerCAmelCase = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ )
lowerCAmelCase = []
lowerCAmelCase , lowerCAmelCase = 0.0, 0.0
for i in range(len(snake_case__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
"""simple docstring"""
import os
import numpy
import onnx
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : str = a.name
lowerCAmelCase__ : Optional[int] = b.name
lowerCAmelCase__ : Any = ''''''
lowerCAmelCase__ : str = ''''''
lowerCAmelCase__ : int = a == b
lowerCAmelCase__ : int = name_a
lowerCAmelCase__ : Union[str, Any] = name_b
return res
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(A_ , A_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , A_ , A_ )
_graph_replace_input_with(node_proto.attribute[1].g , A_ , A_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , A_ , A_ )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
for n in graph_proto.node:
_node_replace_input_with(A_ , A_ , A_ )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ : List[str] = list(model.graph.initializer )
lowerCAmelCase__ : Optional[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowerCAmelCase__ : Dict = inits[i].name
lowerCAmelCase__ : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , A_ , A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Any = os.path.dirname(A_ )
lowerCAmelCase__ : Optional[int] = os.path.basename(A_ )
lowerCAmelCase__ : Optional[Any] = onnx.load(os.path.join(A_ , A_ ) )
lowerCAmelCase__ : List[Any] = list(model.graph.initializer )
lowerCAmelCase__ : Union[str, Any] = set()
lowerCAmelCase__ : Dict = {}
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : int = 0
for i in range(len(A_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(A_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(A_ )
dup_set.add(A_ )
lowerCAmelCase__ : Optional[int] = inits[j].data_type
lowerCAmelCase__ : Any = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , A_ )
total_reduced_size += mem_size
lowerCAmelCase__ : List[Any] = inits[i].name
lowerCAmelCase__ : Union[str, Any] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(A_ )
else:
lowerCAmelCase__ : Dict = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 10_24 / 10_24 / 10_24 , '''GB''' )
lowerCAmelCase__ : List[str] = sorted(A_ )
_remove_dup_initializers_from_model(A_ , A_ , A_ )
lowerCAmelCase__ : Optional[int] = '''optimized_''' + model_file_name
lowerCAmelCase__ : List[Any] = os.path.join(A_ , A_ )
onnx.save(A_ , A_ )
return new_model
| 106 | import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowercase__ : Dict = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowercase__ : Optional[int] = [0, 2_5, 5_0]
lowercase__ : Union[str, Any] = [2_5, 5_0, 7_5]
lowercase__ : int = fuzz.membership.trimf(X, abca)
lowercase__ : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowercase__ : List[str] = np.ones(7_5)
lowercase__ : Any = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
lowercase__ : Union[str, Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowercase__ : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowercase__ : Union[str, Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowercase__ : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowercase__ : Any = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowercase__ : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowercase__ : Tuple = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowercase__ : Tuple = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 338 | 0 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case__ :
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( *__lowerCamelCase : int , **__lowerCamelCase : Optional[int] ) -> Dict:
pass
@is_pipeline_test
@require_vision
@require_torch
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Dict ) -> Union[str, Any]:
a = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
a = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
a = object_detector(examples[0] , threshold=0.0 )
a = len(__lowerCamelCase )
self.assertGreater(__lowerCamelCase , 0 )
self.assertEqual(
__lowerCamelCase , [
{
"score": ANY(__lowerCamelCase ),
"label": ANY(__lowerCamelCase ),
"box": {"xmin": ANY(__lowerCamelCase ), "ymin": ANY(__lowerCamelCase ), "xmax": ANY(__lowerCamelCase ), "ymax": ANY(__lowerCamelCase )},
}
for i in range(__lowerCamelCase )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
@require_torch
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
a = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
a = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.7_235, "label": "cat", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.7_218, "label": "remote", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.7_184, "label": "couch", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.6_748, "label": "remote", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6_656, "label": "cat", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6_614, "label": "couch", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6_456, "label": "remote", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 2_74, "xmax": 93, "ymax": 2_97}},
{"score": 0.6_419, "label": "cat", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}},
] , )
a = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.7_235, "label": "cat", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.7_218, "label": "remote", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.7_184, "label": "couch", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.6_748, "label": "remote", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6_656, "label": "cat", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6_614, "label": "couch", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6_456, "label": "remote", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 2_74, "xmax": 93, "ymax": 2_97}},
{"score": 0.6_419, "label": "cat", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}},
]
] , )
@require_torch
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
a = pipeline("zero-shot-object-detection" )
a = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 3_35, "ymin": 74, "xmax": 3_71, "ymax": 1_87}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_42, "ymax": 4_76}},
] , )
a = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.2_868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 3_35, "ymin": 74, "xmax": 3_71, "ymax": 1_87}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_42, "ymax": 4_76}},
],
[
{"score": 0.2_868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 3_35, "ymin": 74, "xmax": 3_71, "ymax": 1_87}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_42, "ymax": 4_76}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
pass
@require_torch
@slow
def __UpperCAmelCase ( self : Any ) -> Tuple:
a = 0.2
a = pipeline("zero-shot-object-detection" )
a = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=__lowerCamelCase , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}},
] , )
@require_torch
@slow
def __UpperCAmelCase ( self : str ) -> Any:
a = 2
a = pipeline("zero-shot-object-detection" )
a = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=__lowerCamelCase , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
] , )
| 107 | import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->Optional[Any]:
lowerCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ):
if i == len(__SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase = -1
else:
lowerCAmelCase = timesteps[i + 1]
lowerCAmelCase = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 1, 0]
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
| 338 | 0 |
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase ):
"""simple docstring"""
@register_to_config
def __init__( self , *,
snake_case__ = 4 , snake_case__ = 768 , snake_case__ , snake_case__ , ):
"""simple docstring"""
super().__init__()
lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.zeros(snake_case__ ) )
# parameters for additional clip time embeddings
lowerCAmelCase : Tuple = nn.Linear(snake_case__ , snake_case__ )
lowerCAmelCase : str = nn.Linear(snake_case__ , snake_case__ )
# parameters for encoder hidden states
lowerCAmelCase : Optional[int] = clip_extra_context_tokens
lowerCAmelCase : Union[str, Any] = nn.Linear(
snake_case__ , self.clip_extra_context_tokens * cross_attention_dim )
lowerCAmelCase : List[Any] = nn.Linear(snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = nn.LayerNorm(snake_case__ )
def lowercase__ ( self , *, snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
lowerCAmelCase : Dict = image_embeddings.shape[0]
lowerCAmelCase : Optional[Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
lowerCAmelCase : List[str] = classifier_free_guidance_embeddings.expand(
snake_case__ , -1 )
lowerCAmelCase : str = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
lowerCAmelCase : List[str] = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
lowerCAmelCase : int = self.embedding_proj(snake_case__ )
lowerCAmelCase : Optional[int] = self.clip_image_embeddings_project_to_time_embeddings(snake_case__ )
lowerCAmelCase : Any = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
lowerCAmelCase : List[Any] = self.clip_extra_context_tokens_proj(snake_case__ )
lowerCAmelCase : Any = clip_extra_context_tokens.reshape(snake_case__ , -1 , self.clip_extra_context_tokens )
lowerCAmelCase : List[str] = clip_extra_context_tokens.permute(0 , 2 , 1 )
lowerCAmelCase : Dict = self.encoder_hidden_states_proj(snake_case__ )
lowerCAmelCase : Union[str, Any] = self.text_encoder_hidden_states_norm(snake_case__ )
lowerCAmelCase : Tuple = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 108 | import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase__ : str = logging.get_logger(__name__)
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = """AutoTokenizer"""
UpperCAmelCase_ : Optional[int] = ["""tokenizer"""]
UpperCAmelCase_ : str = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = speaker_embeddings
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , **__SCREAMING_SNAKE_CASE ) ->Tuple:
if speaker_embeddings_dict_path is not None:
lowerCAmelCase = get_file_from_repo(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , )
if speaker_embeddings_path is None:
logger.warning(
F"`{os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
lowerCAmelCase = None
else:
with open(__SCREAMING_SNAKE_CASE ) as speaker_embeddings_json:
lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = None
lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return cls(tokenizer=__SCREAMING_SNAKE_CASE , speaker_embeddings=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , __SCREAMING_SNAKE_CASE="speaker_embeddings" , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ) ->int:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''v2''' ) , exist_ok=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
lowerCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , __SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}.npy" )
lowerCAmelCase = tmp_dict
with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , '''w''' ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = self.speaker_embeddings[voice_preset]
lowerCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
lowerCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , )
if path is None:
raise ValueError(
F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE )
return voice_preset_dict
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None ) ->Tuple:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="pt" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) ->int:
if voice_preset is not None and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE )
else:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not voice_preset.endswith('''.npz''' ):
lowerCAmelCase = voice_preset + '''.npz'''
lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE )
if voice_preset is not None:
self._validate_voice_preset_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCAmelCase = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if voice_preset is not None:
lowerCAmelCase = voice_preset
return encoded_text
| 338 | 0 |
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
A: Any = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
A: int = "main"
# Default branch name
A: Tuple = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
A: List[str] = "aaaaaaa"
# This commit does not exist, so we should 404.
A: Any = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
A: Optional[Any] = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def _snake_case ( ):
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def _snake_case ( ):
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""labels"""] )
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""start_positions""", """end_positions"""] )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""labels"""] )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""labels"""] )
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""start_positions""", """end_positions"""] )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""labels"""] )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , [] )
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , [] )
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , [] )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , [] )
| 109 | import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 338 | 0 |
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] ) -> str:
if not all(char in '''01''' for char in bin_string ):
raise ValueError('''Non-binary value was passed to the function''' )
if not bin_string:
raise ValueError('''Empty string was passed to the function''' )
UpperCamelCase__ : Any = ''''''
while len(snake_case__ ) % 3 != 0:
UpperCamelCase__ : str = '''0''' + bin_string
UpperCamelCase__ : Optional[int] = [
bin_string[index : index + 3]
for index in range(len(snake_case__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
UpperCamelCase__ : Union[str, Any] = 0
for index, val in enumerate(snake_case__ ):
oct_val += int(2 ** (2 - index) * int(snake_case__ ) )
oct_string += str(snake_case__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 201 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase__ : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowercase__ : Dict = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
lowerCAmelCase = self.transformer_dir
shutil.copy(
os.path.join(__SCREAMING_SNAKE_CASE , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Union[str, Any]:
lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' , newline='''\n''' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
self.assertTrue(f.read() , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __SCREAMING_SNAKE_CASE , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with a really long name
lowerCAmelCase = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('''Bert''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __SCREAMING_SNAKE_CASE , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
self.assertFalse(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 338 | 0 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _A ( UpperCamelCase_):
SCREAMING_SNAKE_CASE : List[str] = (DPMSolverSDEScheduler,)
SCREAMING_SNAKE_CASE : Union[str, Any] = 10
def UpperCAmelCase ( self , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'num_train_timesteps': 1100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def UpperCAmelCase ( self ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[str] = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE_ : int = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ : Optional[Any] = sample.to(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = output.prev_sample
SCREAMING_SNAKE_CASE_ : Dict = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : Dict = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_scheduler_config(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE_ : Any = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE_ : Any = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ : List[str] = sample.to(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.prev_sample
SCREAMING_SNAKE_CASE_ : Tuple = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Dict = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps , device=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = self.dummy_model()
SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_sample_deter.to(__SCREAMING_SNAKE_CASE ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = output.prev_sample
SCREAMING_SNAKE_CASE_ : Tuple = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : List[str] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_class(**__SCREAMING_SNAKE_CASE , use_karras_sigmas=__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(self.num_inference_steps , device=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample_deter.to(__SCREAMING_SNAKE_CASE ) * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ : Optional[Any] = sample.to(__SCREAMING_SNAKE_CASE )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ : Any = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE_ : int = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 253 | import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = split_dict._to_yaml_list()
assert len(snake_case__ ) == len(snake_case__ )
lowerCAmelCase = SplitDict._from_yaml_list(snake_case__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=snake_case__ ), SplitInfo(dataset_name='''my_dataset''' )] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 338 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A ={
'''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''],
'''tokenization_m2m_100''': ['''M2M100Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =[
'''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''M2M100ForConditionalGeneration''',
'''M2M100Model''',
'''M2M100PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34 | import unittest
import numpy as np
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ) -> np.ndarray:
lowerCAmelCase = np.shape(snake_case__ )
lowerCAmelCase = np.shape(snake_case__ )
lowerCAmelCase = np.shape(snake_case__ )
if shape_a[0] != shape_b[0]:
lowerCAmelCase = (
'''Expected the same number of rows for A and B. '''
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(snake_case__ )
if shape_b[1] != shape_c[1]:
lowerCAmelCase = (
'''Expected the same number of columns for B and C. '''
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(snake_case__ )
lowerCAmelCase = pseudo_inv
if a_inv is None:
try:
lowerCAmelCase = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
lowerCAmelCase = schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.block([[a, b], [b.T, c]] )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 338 | 0 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a_ ( ) -> Optional[int]:
__lowerCamelCase ,__lowerCamelCase : Dict = 9, 14 # noqa: F841
__lowerCamelCase : Dict = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__lowerCamelCase : Optional[int] = defaultdict(snake_case__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__lowerCamelCase : Union[str, Any] = mst(snake_case__ )
__lowerCamelCase : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__lowerCamelCase : int = tuple(answer[:2] )
__lowerCamelCase : List[Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 208 | import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowercase__ : Any = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
lowerCAmelCase = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
lowercase__ : List[Any] = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = list(s_dict.keys() )
for key in keys:
lowerCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowerCAmelCase = new_key.replace(snake_case__ , snake_case__ )
print(f"{key} -> {new_key}" )
lowerCAmelCase = s_dict.pop(snake_case__ )
return s_dict
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = emb.weight.shape
lowerCAmelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
lowerCAmelCase = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> bytes:
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase = os.path.basename(snake_case__ )
lowerCAmelCase = url.split('''/''' )[-2]
lowerCAmelCase = os.path.join(snake_case__ , snake_case__ )
if os.path.exists(snake_case__ ) and not os.path.isfile(snake_case__ ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(snake_case__ ):
lowerCAmelCase = open(snake_case__ , '''rb''' ).read()
if hashlib.shaaaa(snake_case__ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(snake_case__ ) as source, open(snake_case__ , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=snake_case__ , unit_divisor=1_0_2_4 ) as loop:
while True:
lowerCAmelCase = source.read(8_1_9_2 )
if not buffer:
break
output.write(snake_case__ )
loop.update(len(snake_case__ ) )
lowerCAmelCase = open(snake_case__ , '''rb''' ).read()
if hashlib.shaaaa(snake_case__ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
if ".pt" not in checkpoint_path:
lowerCAmelCase = _download(_MODELS[checkpoint_path] )
else:
lowerCAmelCase = torch.load(snake_case__ , map_location='''cpu''' )
lowerCAmelCase = original_checkpoint['''dims''']
lowerCAmelCase = original_checkpoint['''model_state_dict''']
lowerCAmelCase = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(snake_case__ )
rename_keys(snake_case__ )
lowerCAmelCase = True
lowerCAmelCase = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
lowerCAmelCase = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=snake_case__ , decoder_ffn_dim=snake_case__ , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
lowerCAmelCase = WhisperForConditionalGeneration(snake_case__ )
lowerCAmelCase , lowerCAmelCase = model.model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0 and not set(snake_case__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCAmelCase = proj_out_weights
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase__ : int = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 338 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
__lowercase : int = list[tuple[int, int]]
__lowercase : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase : Union[str, Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowercase :
def __init__(self , A , A , A , A , A ):
lowerCamelCase_ : Optional[int] = pos_x
lowerCamelCase_ : Any = pos_y
lowerCamelCase_ : List[Any] = (pos_y, pos_x)
lowerCamelCase_ : Optional[int] = goal_x
lowerCamelCase_ : Optional[int] = goal_y
lowerCamelCase_ : Optional[int] = parent
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Dict = [self.start]
lowerCamelCase_ : Union[str, Any] = False
def UpperCAmelCase__ (self ):
while self.node_queue:
lowerCamelCase_ : str = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase_ : Union[str, Any] = True
return self.retrace_path(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : str = self.get_successors(__SCREAMING_SNAKE_CASE )
for node in successors:
self.node_queue.append(__SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Dict = []
for action in delta:
lowerCamelCase_ : List[Any] = parent.pos_x + action[1]
lowerCamelCase_ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , __SCREAMING_SNAKE_CASE ) )
return successors
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = node
lowerCamelCase_ : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase_ : Optional[int] = current_node.parent
path.reverse()
return path
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Tuple = BreadthFirstSearch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Union[str, Any] = BreadthFirstSearch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ : List[Any] = False
def UpperCAmelCase__ (self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCamelCase_ : Dict = self.fwd_bfs.node_queue.pop(0 )
lowerCamelCase_ : Tuple = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCamelCase_ : Optional[int] = True
return self.retrace_bidirectional_path(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Optional[Any] = current_bwd_node
lowerCamelCase_ : Any = current_fwd_node
lowerCamelCase_ : Dict = {
self.fwd_bfs: self.fwd_bfs.get_successors(__SCREAMING_SNAKE_CASE ),
self.bwd_bfs: self.bwd_bfs.get_successors(__SCREAMING_SNAKE_CASE ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : Union[str, Any] = self.fwd_bfs.retrace_path(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : List[str] = self.bwd_bfs.retrace_path(__SCREAMING_SNAKE_CASE )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase_ : Tuple = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowercase : Dict = (0, 0)
__lowercase : Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase : Dict = time.time()
__lowercase : Optional[Any] = BreadthFirstSearch(init, goal)
__lowercase : Dict = bfs.search()
__lowercase : Tuple = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__lowercase : Optional[int] = time.time()
__lowercase : List[Any] = BidirectionalBreadthFirstSearch(init, goal)
__lowercase : Optional[Any] = bd_bfs.search()
__lowercase : Union[str, Any] = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 318 | from ...processing_utils import ProcessorMixin
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = ["""image_processor""", """feature_extractor"""]
UpperCAmelCase_ : Optional[int] = """TvltImageProcessor"""
UpperCAmelCase_ : Optional[int] = """TvltFeatureExtractor"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
super().__init__(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor
lowerCAmelCase = feature_extractor
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) ->List[Any]:
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
lowerCAmelCase = None
if images is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , mask_pixel=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if images_mixed is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , is_mixed=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if audio is not None:
lowerCAmelCase = self.feature_extractor(
__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , mask_audio=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
if audio is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images_mixed_dict is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
return output_dict
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.image_processor.model_input_names
lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 338 | 0 |
from __future__ import annotations
from fractions import Fraction
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : str = []
__lowercase : Optional[Any] = 11
__lowercase : List[Any] = int("""1""" + """0""" * digit_len )
for num in range(snake_case__ , snake_case__ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(snake_case__ , snake_case__ ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
__lowercase : Any = 10
return solutions
def snake_case_ ( lowerCAmelCase_ : Optional[Any] = 2 ):
__lowercase : str = 1.0
for fraction in fraction_list(snake_case__ ):
__lowercase : List[Any] = Fraction(snake_case__ )
result *= frac.denominator / frac.numerator
return int(snake_case__ )
if __name__ == "__main__":
print(solution()) | 233 | def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[str]:
lowerCAmelCase = len(snake_case__ )
for i in range(length - 1 ):
lowerCAmelCase = i
for k in range(i + 1 , snake_case__ ):
if collection[k] < collection[least]:
lowerCAmelCase = k
if least != i:
lowerCAmelCase , lowerCAmelCase = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowercase__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
lowercase__ : str = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 338 | 0 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def lowerCamelCase__ ( _A ):
a : Any = split_dict._to_yaml_list()
assert len(snake_case__ ) == len(snake_case__ )
a : Optional[Any] = SplitDict._from_yaml_list(snake_case__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
a : List[Any] = None
# the split name of split_dict takes over the name of the split info object
a : Optional[Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=snake_case__ ), SplitInfo(dataset_name='my_dataset' )] )
def lowerCamelCase__ ( _A ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
a : int = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name | 297 | import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=19 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) ->Union[str, Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__SCREAMING_SNAKE_CASE , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = EsmForProteinFolding(config=__SCREAMING_SNAKE_CASE ).float()
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Dict = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCAmelCase_ : List[Any] = ()
UpperCAmelCase_ : Tuple = {} if is_torch_available() else {}
UpperCAmelCase_ : List[str] = False
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = EsmFoldModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip('''Does not support attention outputs''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
pass
@unittest.skip
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold only has one output format.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
@require_torch
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
lowerCAmelCase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )['''positions''']
lowerCAmelCase = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 338 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case_ (UpperCamelCase_ ):
UpperCAmelCase__ : Dict = ["""image_processor""", """tokenizer"""]
UpperCAmelCase__ : List[str] = """CLIPImageProcessor"""
UpperCAmelCase__ : List[str] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self :Tuple ,__snake_case :Union[str, Any]=None ,__snake_case :Dict=None ,**__snake_case :int ) -> Optional[int]:
a__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,__SCREAMING_SNAKE_CASE ,)
a__ = kwargs.pop('feature_extractor' )
a__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def __call__( self :int ,__snake_case :Tuple=None ,__snake_case :int=None ,__snake_case :Optional[Any]=None ,**__snake_case :List[Any] ) -> Union[str, Any]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
a__ = self.tokenizer(__SCREAMING_SNAKE_CASE ,return_tensors=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
if images is not None:
a__ = self.image_processor(__SCREAMING_SNAKE_CASE ,return_tensors=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
a__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) ,tensor_type=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__( self :str ,*__snake_case :int ,**__snake_case :List[str] ) -> str:
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__( self :Tuple ,*__snake_case :Optional[Any] ,**__snake_case :str ) -> List[str]:
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
@property
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = self.tokenizer.model_input_names
a__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,__SCREAMING_SNAKE_CASE ,)
return self.image_processor_class
@property
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[int]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,__SCREAMING_SNAKE_CASE ,)
return self.image_processor
| 240 | import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = ["""image_processor""", """tokenizer"""]
UpperCAmelCase_ : int = """OwlViTImageProcessor"""
UpperCAmelCase_ : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
lowerCAmelCase = kwargs.pop('''feature_extractor''' )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="max_length" , __SCREAMING_SNAKE_CASE="np" , **__SCREAMING_SNAKE_CASE ) ->int:
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or (isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(text[0] , __SCREAMING_SNAKE_CASE )):
lowerCAmelCase = [self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )]
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(text[0] , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = []
# Maximum number of queries across batch
lowerCAmelCase = max([len(__SCREAMING_SNAKE_CASE ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__SCREAMING_SNAKE_CASE ) != max_num_queries:
lowerCAmelCase = t + [''' '''] * (max_num_queries - len(__SCREAMING_SNAKE_CASE ))
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
encodings.append(__SCREAMING_SNAKE_CASE )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
lowerCAmelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCAmelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCAmelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
lowerCAmelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCAmelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = input_ids
lowerCAmelCase = attention_mask
if query_images is not None:
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).pixel_values
lowerCAmelCase = query_pixel_values
if images is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) , tensor_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Optional[int]:
return self.image_processor.post_process(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Any:
return self.image_processor.post_process_object_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Tuple:
return self.image_processor.post_process_image_guided_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->str:
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 338 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCamelCase__ ( UpperCamelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = """wav2vec2"""
def __init__( self : Any ,lowerCamelCase__ : Union[str, Any]=32 ,lowerCamelCase__ : Optional[int]=768 ,lowerCamelCase__ : Tuple=12 ,lowerCamelCase__ : int=12 ,lowerCamelCase__ : Union[str, Any]=3072 ,lowerCamelCase__ : List[Any]="gelu" ,lowerCamelCase__ : List[str]=0.1 ,lowerCamelCase__ : str=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : Optional[int]=0.02 ,lowerCamelCase__ : Union[str, Any]=1e-5 ,lowerCamelCase__ : Optional[Any]="group" ,lowerCamelCase__ : List[str]="gelu" ,lowerCamelCase__ : List[Any]=(512, 512, 512, 512, 512, 512, 512) ,lowerCamelCase__ : Tuple=(5, 2, 2, 2, 2, 2, 2) ,lowerCamelCase__ : Dict=(10, 3, 3, 3, 3, 2, 2) ,lowerCamelCase__ : Dict=False ,lowerCamelCase__ : str=128 ,lowerCamelCase__ : List[Any]=16 ,lowerCamelCase__ : Union[str, Any]=False ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Optional[Any]=0.05 ,lowerCamelCase__ : List[Any]=10 ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Tuple=10 ,lowerCamelCase__ : Tuple=0 ,lowerCamelCase__ : Union[str, Any]=320 ,lowerCamelCase__ : List[Any]=2 ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : Tuple=100 ,lowerCamelCase__ : Optional[Any]=256 ,lowerCamelCase__ : Any=256 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : List[str]="sum" ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Dict=False ,lowerCamelCase__ : Tuple=256 ,lowerCamelCase__ : List[Any]=(512, 512, 512, 512, 1500) ,lowerCamelCase__ : Optional[Any]=(5, 3, 3, 1, 1) ,lowerCamelCase__ : List[str]=(1, 2, 3, 1, 1) ,lowerCamelCase__ : Optional[Any]=512 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : List[str]=2 ,lowerCamelCase__ : Optional[int]=False ,lowerCamelCase__ : int=3 ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Union[str, Any]=3 ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Any=None ,**lowerCamelCase__ : Union[str, Any] ,) -> Any:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE ,pad_token_id=__SCREAMING_SNAKE_CASE ,bos_token_id=__SCREAMING_SNAKE_CASE ,eos_token_id=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = feat_extract_norm
SCREAMING_SNAKE_CASE = feat_extract_activation
SCREAMING_SNAKE_CASE = list(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = list(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = list(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = conv_bias
SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE = len(self.conv_dim )
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = feat_proj_dropout
SCREAMING_SNAKE_CASE = final_dropout
SCREAMING_SNAKE_CASE = layerdrop
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = do_stable_layer_norm
SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE = apply_spec_augment
SCREAMING_SNAKE_CASE = mask_time_prob
SCREAMING_SNAKE_CASE = mask_time_length
SCREAMING_SNAKE_CASE = mask_time_min_masks
SCREAMING_SNAKE_CASE = mask_feature_prob
SCREAMING_SNAKE_CASE = mask_feature_length
SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE = num_codevectors_per_group
SCREAMING_SNAKE_CASE = num_codevector_groups
SCREAMING_SNAKE_CASE = contrastive_logits_temperature
SCREAMING_SNAKE_CASE = feat_quantizer_dropout
SCREAMING_SNAKE_CASE = num_negatives
SCREAMING_SNAKE_CASE = codevector_dim
SCREAMING_SNAKE_CASE = proj_codevector_dim
SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE = ctc_loss_reduction
SCREAMING_SNAKE_CASE = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE = add_adapter
SCREAMING_SNAKE_CASE = adapter_kernel_size
SCREAMING_SNAKE_CASE = adapter_stride
SCREAMING_SNAKE_CASE = num_adapter_layers
SCREAMING_SNAKE_CASE = output_hidden_size or hidden_size
SCREAMING_SNAKE_CASE = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = list(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = list(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = list(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 296 | import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''spiece.model'''}
lowercase__ : Optional[int] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
lowercase__ : Any = {
'''albert-base-v1''': 5_1_2,
'''albert-large-v1''': 5_1_2,
'''albert-xlarge-v1''': 5_1_2,
'''albert-xxlarge-v1''': 5_1_2,
'''albert-base-v2''': 5_1_2,
'''albert-large-v2''': 5_1_2,
'''albert-xlarge-v2''': 5_1_2,
'''albert-xxlarge-v2''': 5_1_2,
}
lowercase__ : Tuple = '''▁'''
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) ->None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase = (
AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE , normalized=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else mask_token
)
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) ->int:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Any:
if self.remove_space:
lowerCAmelCase = ''' '''.join(inputs.strip().split() )
else:
lowerCAmelCase = inputs
lowerCAmelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowerCAmelCase = unicodedata.normalize('''NFKD''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = ''''''.join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
lowerCAmelCase = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = self.preprocess_text(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
for piece in pieces:
if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase = cur_pieces[1:]
else:
lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__SCREAMING_SNAKE_CASE )
else:
new_pieces.append(__SCREAMING_SNAKE_CASE )
return new_pieces
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
lowerCAmelCase = []
lowerCAmelCase = ''''''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 338 | 0 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowerCAmelCase : List[Any] = 10
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for i in range(snake_case__ , snake_case__ ):
if array[i] == target:
return i
return -1
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = 0
SCREAMING_SNAKE_CASE_: Dict = len(snake_case__ )
while left <= right:
if right - left < precision:
return lin_search(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE_: Any = (left + right) // 3 + 1
SCREAMING_SNAKE_CASE_: str = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
SCREAMING_SNAKE_CASE_: int = one_third - 1
elif array[two_third] < target:
SCREAMING_SNAKE_CASE_: Any = two_third + 1
else:
SCREAMING_SNAKE_CASE_: Any = one_third + 1
SCREAMING_SNAKE_CASE_: Any = two_third - 1
else:
return -1
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if left < right:
if right - left < precision:
return lin_search(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE_: Any = (left + right) // 3 + 1
SCREAMING_SNAKE_CASE_: Any = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(snake_case__ , one_third - 1 , snake_case__ , snake_case__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , snake_case__ , snake_case__ , snake_case__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , snake_case__ , snake_case__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : Tuple = input("""Enter numbers separated by comma:\n""").strip()
lowerCAmelCase : Optional[int] = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
lowerCAmelCase : Any = int(input("""Enter the number to be found in the list:\n""").strip())
lowerCAmelCase : str = ite_ternary_search(collection, target)
lowerCAmelCase : Optional[Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 13 | import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = (DEISMultistepScheduler,)
UpperCAmelCase_ : int = (("""num_inference_steps""", 25),)
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->str:
lowerCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase , lowerCAmelCase = sample, sample
for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
if scheduler is None:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase = scheduler.timesteps[5]
lowerCAmelCase = scheduler.timesteps[6]
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , algorithm_type='''deis''' , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = self.full_loop(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
assert not torch.isnan(__SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
| 338 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class A ( UpperCamelCase_ ):
'''simple docstring'''
def __init__(self , *_UpperCAmelCase , **_UpperCAmelCase ) -> None:
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 298 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = '''google/ncsnpp-celebahq-256'''
lowerCAmelCase = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 338 | 0 |
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] ) -> int:
assert column_title.isupper()
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Any = len(snake_case__ ) - 1
UpperCamelCase__ : List[str] = 0
while index >= 0:
UpperCamelCase__ : List[str] = (ord(column_title[index] ) - 64) * pow(26 , snake_case__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 201 | from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowercase__ : Dict = logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase_ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
lowerCAmelCase = self.get_masked_index(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Dict[str, GenericTensor]:
if return_tensors is None:
lowerCAmelCase = self.framework
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = self.model(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model_inputs['''input_ids''']
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=None ) ->str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase = target_ids.shape[0]
lowerCAmelCase = model_outputs['''input_ids'''][0]
lowerCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowerCAmelCase = outputs.numpy()
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
lowerCAmelCase = tf.gather_nd(tf.squeeze(__SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
lowerCAmelCase = tf.math.top_k(__SCREAMING_SNAKE_CASE , k=__SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
lowerCAmelCase = probs[..., target_ids]
lowerCAmelCase , lowerCAmelCase = probs.topk(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
lowerCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowerCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowerCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase = target_ids[p].tolist()
lowerCAmelCase = p
# Filter padding out:
lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [targets]
try:
lowerCAmelCase = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase = {}
lowerCAmelCase = []
for target in targets:
lowerCAmelCase = vocab.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if id_ is None:
lowerCAmelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , max_length=1 , truncation=__SCREAMING_SNAKE_CASE , )['''input_ids''']
if len(__SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
lowerCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
lowerCAmelCase = list(set(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
return target_ids
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->Dict:
lowerCAmelCase = {}
if targets is not None:
lowerCAmelCase = self.get_target_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = target_ids
if top_k is not None:
lowerCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 338 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase : Dict = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase : List[Any] = {
'''gpt2''': 10_24,
'''gpt2-medium''': 10_24,
'''gpt2-large''': 10_24,
'''gpt2-xl''': 10_24,
'''distilgpt2''': 10_24,
}
class _A ( UpperCamelCase_):
SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('add_bos_token' , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
SCREAMING_SNAKE_CASE_ : Dict = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ : int = add_prefix_space
SCREAMING_SNAKE_CASE_ : List[str] = pre_tok_class(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = add_prefix_space
def UpperCAmelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.get('is_split_into_words' , __SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('is_split_into_words' , __SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(__SCREAMING_SNAKE_CASE ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : Any = input_ids[-self.model_max_length :]
return input_ids
| 253 | from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase__ : int = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowercase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 338 | 0 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def snake_case_ ():
raise RuntimeError('''CUDA out of memory.''' )
class _a ( nn.Module ):
def __init__( self : Tuple ):
'''simple docstring'''
super().__init__()
UpperCAmelCase = nn.Linear(3 , 4 )
UpperCAmelCase = nn.BatchNormad(4 )
UpperCAmelCase = nn.Linear(4 , 5 )
def A ( self : str , lowercase : Optional[Any] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__SCREAMING_SNAKE_CASE ) ) )
class _a ( unittest.TestCase ):
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase : Optional[Any] ):
nonlocal batch_sizes
batch_sizes.append(__SCREAMING_SNAKE_CASE )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__SCREAMING_SNAKE_CASE , [128, 64, 32, 16, 8] )
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase : Any , lowercase : Any ):
nonlocal batch_sizes
batch_sizes.append(__SCREAMING_SNAKE_CASE )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCAmelCase , UpperCAmelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def A ( self : Tuple ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowercase : List[Any] ):
pass
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def A ( self : Tuple ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase : Optional[Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def A ( self : Union[str, Any] ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase : Optional[int] , lowercase : List[str] , lowercase : Union[str, Any] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def A ( self : List[Any] ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase : Optional[int] ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = torch.cuda.memory_allocated()
UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __SCREAMING_SNAKE_CASE )
UpperCAmelCase = release_memory(__SCREAMING_SNAKE_CASE )
self.assertEqual(torch.cuda.memory_allocated() , __SCREAMING_SNAKE_CASE )
| 34 | lowercase__ : Optional[int] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def SCREAMING_SNAKE_CASE_ ( ) -> None:
lowerCAmelCase = input('''Enter message: ''' )
lowerCAmelCase = input('''Enter key [alphanumeric]: ''' )
lowerCAmelCase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowerCAmelCase = '''encrypt'''
lowerCAmelCase = encrypt_message(snake_case__ , snake_case__ )
elif mode.lower().startswith('''d''' ):
lowerCAmelCase = '''decrypt'''
lowerCAmelCase = decrypt_message(snake_case__ , snake_case__ )
print(f"\n{mode.title()}ed message:" )
print(snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
return translate_message(snake_case__ , snake_case__ , '''encrypt''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
return translate_message(snake_case__ , snake_case__ , '''decrypt''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> str:
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = key.upper()
for symbol in message:
lowerCAmelCase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case__ ):
lowerCAmelCase = 0
else:
translated.append(snake_case__ )
return "".join(snake_case__ )
if __name__ == "__main__":
main()
| 338 | 0 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
_UpperCamelCase = logging.getLogger(__name__)
class lowerCamelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : int , _a : Tuple , _a : Union[str, Any] , _a : Optional[int]=None , _a : List[str]=None ) -> Optional[Any]:
__lowerCamelCase : int = self.layer[current_layer](__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , head_mask[current_layer] )
__lowerCamelCase : Any = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , UpperCamelCase_ , )
class lowerCamelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : int , _a : str ) -> List[Any]:
super().__init__(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[str] = BertEncoderWithPabee(__SCREAMING_SNAKE_CASE )
self.init_weights()
__lowerCamelCase : Dict = 0
__lowerCamelCase : str = 0
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : Tuple = 0
def _lowercase ( self : Optional[Any] , _a : Any ) -> Dict:
__lowerCamelCase : Tuple = threshold
def _lowercase ( self : Optional[int] , _a : Optional[Any] ) -> Union[str, Any]:
__lowerCamelCase : Any = patience
def _lowercase ( self : Dict ) -> Union[str, Any]:
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Tuple = 0
def _lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCamelCase : Tuple = self.inference_layers_num / self.inference_instances_num
__lowerCamelCase : Union[str, Any] = (
f'*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='
f' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'
)
print(__SCREAMING_SNAKE_CASE )
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE )
def _lowercase ( self : Optional[int] , _a : Union[str, Any]=None , _a : str=None , _a : str=None , _a : List[str]=None , _a : Dict=None , _a : str=None , _a : Optional[Any]=None , _a : List[str]=None , _a : Tuple=None , _a : str=None , _a : Dict=False , ) -> Tuple:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__lowerCamelCase : Any = input_ids.size()
elif inputs_embeds is not None:
__lowerCamelCase : List[Any] = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__lowerCamelCase : List[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__lowerCamelCase : Union[str, Any] = torch.ones(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
if token_type_ids is None:
__lowerCamelCase : str = torch.zeros(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__lowerCamelCase : Optional[int] = self.get_extended_attention_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase : Tuple = encoder_hidden_states.size()
__lowerCamelCase : str = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__lowerCamelCase : int = torch.ones(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[Any] = self.invert_attention_mask(__SCREAMING_SNAKE_CASE )
else:
__lowerCamelCase : str = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__lowerCamelCase : Tuple = self.get_head_mask(__SCREAMING_SNAKE_CASE , self.config.num_hidden_layers )
__lowerCamelCase : str = self.embeddings(
input_ids=__SCREAMING_SNAKE_CASE , position_ids=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , inputs_embeds=__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Any = embedding_output
if self.training:
__lowerCamelCase : Optional[Any] = []
for i in range(self.config.num_hidden_layers ):
__lowerCamelCase : Optional[int] = self.encoder.adaptive_forward(
__SCREAMING_SNAKE_CASE , current_layer=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Dict = self.pooler(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[int] = output_layers[i](output_dropout(__SCREAMING_SNAKE_CASE ) )
res.append(__SCREAMING_SNAKE_CASE )
elif self.patience == 0: # Use all layers for inference
__lowerCamelCase : Tuple = self.encoder(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
__lowerCamelCase : Tuple = self.pooler(encoder_outputs[0] )
__lowerCamelCase : Tuple = [output_layers[self.config.num_hidden_layers - 1](__SCREAMING_SNAKE_CASE )]
else:
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Dict = None
__lowerCamelCase : List[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__lowerCamelCase : Tuple = self.encoder.adaptive_forward(
__SCREAMING_SNAKE_CASE , current_layer=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE )
__lowerCamelCase : str = self.pooler(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Dict = output_layers[i](__SCREAMING_SNAKE_CASE )
if regression:
__lowerCamelCase : Tuple = logits.detach()
if patient_result is not None:
__lowerCamelCase : int = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__lowerCamelCase : str = 0
else:
__lowerCamelCase : Optional[int] = logits.detach().argmax(dim=1 )
if patient_result is not None:
__lowerCamelCase : Any = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(__SCREAMING_SNAKE_CASE ) ):
patient_counter += 1
else:
__lowerCamelCase : Dict = 0
__lowerCamelCase : Tuple = logits
if patient_counter == self.patience:
break
__lowerCamelCase : Optional[int] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , UpperCamelCase_ , )
class lowerCamelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : str , _a : str ) -> Union[str, Any]:
super().__init__(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Tuple = config.num_labels
__lowerCamelCase : Optional[int] = BertModelWithPabee(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : str = nn.Dropout(config.hidden_dropout_prob )
__lowerCamelCase : str = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE )
def _lowercase ( self : int , _a : Dict=None , _a : Any=None , _a : Dict=None , _a : str=None , _a : Union[str, Any]=None , _a : Any=None , _a : str=None , ) -> Union[str, Any]:
__lowerCamelCase : Any = self.bert(
input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , position_ids=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , inputs_embeds=__SCREAMING_SNAKE_CASE , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__lowerCamelCase : Dict = (logits[-1],)
if labels is not None:
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : Union[str, Any] = 0
for ix, logits_item in enumerate(__SCREAMING_SNAKE_CASE ):
if self.num_labels == 1:
# We are doing regression
__lowerCamelCase : Optional[Any] = MSELoss()
__lowerCamelCase : List[str] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__lowerCamelCase : Optional[int] = CrossEntropyLoss()
__lowerCamelCase : Optional[Any] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__lowerCamelCase : int = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__lowerCamelCase : Dict = (total_loss / total_weights,) + outputs
return outputs
| 208 | from collections import defaultdict
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0_0_0_0_0 , snake_case__ = 1_0 ) -> int:
lowerCAmelCase = defaultdict(snake_case__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCAmelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCAmelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(snake_case__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f'{solution() = }')
| 338 | 0 |
'''simple docstring'''
def lowercase_ ( _lowercase ) -> str:
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
lowerCamelCase_ : Optional[Any] = False
if num < 0:
lowerCamelCase_ : str = True
lowerCamelCase_ : Dict = -num
lowerCamelCase_ : Union[str, Any] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Union[str, Any]:
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = features.copy() if features else default_expected_features
lowerCAmelCase = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase = TextDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
if issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase = text_path
elif issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase = [text_path]
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=("train",) ) -> Optional[Any]:
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase = TextDatasetReader({'''train''': text_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCAmelCase = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = features.copy() if features else default_expected_features
lowerCAmelCase = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase = TextDatasetReader({'''train''': text_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any:
if split:
lowerCAmelCase = {split: text_path}
else:
lowerCAmelCase = '''train'''
lowerCAmelCase = {'''train''': text_path, '''test''': text_path}
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 338 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCamelCase : Tuple = (3, 9, -11, 0, 7, 5, 1, -1)
lowerCamelCase : Union[str, Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : int
_A : Node | None
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , __a : Optional[int] ) -> None:
"""simple docstring"""
__lowercase : List[Any] = None
for i in sorted(__SCREAMING_SNAKE_CASE , reverse=__SCREAMING_SNAKE_CASE ):
__lowercase : Union[str, Any] = Node(__SCREAMING_SNAKE_CASE , self.head )
def __iter__( self : str ) -> Iterator[int]:
"""simple docstring"""
__lowercase : str = self.head
while node:
yield node.data
__lowercase : List[Any] = node.next_node
def __len__( self : Optional[int] ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self : Any ) -> str:
"""simple docstring"""
return " -> ".join([str(__SCREAMING_SNAKE_CASE ) for node in self] )
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any ):
return SortedLinkedList(list(snake_case__ ) + list(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Dict = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 233 | def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
lowerCAmelCase = False
if num < 0:
lowerCAmelCase = True
lowerCAmelCase = -num
lowerCAmelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCAmelCase: List[Any] = 2
class a__:
def __init__( self : Tuple , *, # begin keyword-only arguments
__snake_case : Any="<s>" , __snake_case : str="<pad>" , __snake_case : List[str]="</s>" , __snake_case : str="<unk>" , __snake_case : List[str]=None , ):
a , a , a , a : List[str] = bos, unk, pad, eos
a : Union[str, Any] = []
a : List[Any] = []
a : str = {}
a : List[str] = self.add_symbol(__SCREAMING_SNAKE_CASE )
a : List[Any] = self.add_symbol(__SCREAMING_SNAKE_CASE )
a : Any = self.add_symbol(__SCREAMING_SNAKE_CASE )
a : Dict = self.add_symbol(__SCREAMING_SNAKE_CASE )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__SCREAMING_SNAKE_CASE )
a : Dict = len(self.symbols )
def __eq__( self : str , __snake_case : List[Any] ):
return self.indices == other.indices
def __getitem__( self : Optional[Any] , __snake_case : List[Any] ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Optional[int] ):
return len(self.symbols )
def __contains__( self : Dict , __snake_case : Any ):
return sym in self.indices
@classmethod
def lowercase_ ( cls : List[Any] , __snake_case : List[str] ):
a : Optional[Any] = cls()
d.add_from_file(__SCREAMING_SNAKE_CASE )
return d
def lowercase_ ( self : Optional[int] , __snake_case : Optional[int] , __snake_case : int=1 , __snake_case : Optional[int]=False ):
if word in self.indices and not overwrite:
a : Union[str, Any] = self.indices[word]
a : List[str] = self.count[idx] + n
return idx
else:
a : int = len(self.symbols )
a : Any = idx
self.symbols.append(__SCREAMING_SNAKE_CASE )
self.count.append(__SCREAMING_SNAKE_CASE )
return idx
def lowercase_ ( self : Optional[Any] , __snake_case : str ):
return 0
def lowercase_ ( self : List[Any] , __snake_case : Union[str, Any] ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
try:
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(__SCREAMING_SNAKE_CASE ) )
return
a : str = f.readlines()
a : List[str] = self._load_meta(__SCREAMING_SNAKE_CASE )
for line in lines[indices_start_line:]:
try:
a , a : Optional[Any] = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
a : Optional[int] = True
a , a : int = line.rsplit(' ' , 1 )
else:
a : str = False
a : Union[str, Any] = int(__SCREAMING_SNAKE_CASE )
a : Optional[int] = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(__SCREAMING_SNAKE_CASE ) )
self.add_symbol(__SCREAMING_SNAKE_CASE , n=__SCREAMING_SNAKE_CASE , overwrite=__SCREAMING_SNAKE_CASE )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def lowerCamelCase__ ( _A ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
a : Dict = dict((re.sub(r'@@$' , '' , snake_case__ ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , snake_case__ ), v) for k, v in d.items() )
a : Dict = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
a : List[Any] = d[k] # restore
return da
def lowerCamelCase__ ( _A , _A ):
# prep
if not os.path.exists(snake_case__ ):
raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
a : Optional[int] = os.path.join(snake_case__ , 'checkpoint.pt' )
if not os.path.isfile(snake_case__ ):
raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" )
a : Optional[Any] = torch.load(snake_case__ , map_location='cpu' )
a : List[str] = chkpt['cfg']['model']
# dicts
a : Any = os.path.join(snake_case__ , 'dict.txt' )
if not os.path.isfile(snake_case__ ):
raise ValueError(f"""path to the file {dict_file} does not exist!""" )
a : List[Any] = Dictionary.load(snake_case__ )
a : Union[str, Any] = rewrite_dict_keys(src_dict.indices )
a : List[str] = len(snake_case__ )
a : int = os.path.join(snake_case__ , VOCAB_FILES_NAMES['vocab_file'] )
print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(snake_case__ , ensure_ascii=snake_case__ , indent=snake_case__ ) )
# merges_file (bpecodes)
a : str = os.path.join(snake_case__ , 'bpecodes' )
if not os.path.isfile(snake_case__ ):
raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" )
a : str = os.path.join(snake_case__ , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(snake_case__ , snake_case__ )
# model config
a : Tuple = os.path.join(snake_case__ , 'config.json' )
a : List[str] = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(f"""Generating {biogpt_model_config_file}""" )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(snake_case__ , ensure_ascii=snake_case__ , indent=snake_case__ ) )
# tokenizer config
a : Union[str, Any] = os.path.join(snake_case__ , snake_case__ )
a : Any = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(f"""Generating {biogpt_tokenizer_config_file}""" )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(snake_case__ , ensure_ascii=snake_case__ , indent=snake_case__ ) )
# model
a : str = chkpt['model']
# remove unneeded keys
a : Any = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(snake_case__ , snake_case__ )
a : Dict = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
a : Any = model_state_dict.pop(snake_case__ )
else:
a : Any = model_state_dict.pop(snake_case__ )
a : Any = BioGptConfig.from_pretrained(snake_case__ )
a : Optional[Any] = BioGptForCausalLM(snake_case__ )
# check that it loads ok
model_new.load_state_dict(snake_case__ )
# save
a : Optional[Any] = os.path.join(snake_case__ , snake_case__ )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(snake_case__ , snake_case__ )
print('Conversion is done!' )
if __name__ == "__main__":
lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase: Union[str, Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path) | 297 | class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = name
lowerCAmelCase = value
lowerCAmelCase = weight
def __repr__( self ) ->str:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
return self.value
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
return self.name
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
return self.weight
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
return self.value / self.weight
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int:
lowerCAmelCase = []
for i in range(len(snake_case__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
lowerCAmelCase = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ )
lowerCAmelCase = []
lowerCAmelCase , lowerCAmelCase = 0.0, 0.0
for i in range(len(snake_case__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(snake_case__ , snake_case__ ):
raise TypeError('Input value must be a \'int\' type' )
return bin(snake_case__ ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 240 | import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowercase__ : Dict = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowercase__ : Optional[int] = [0, 2_5, 5_0]
lowercase__ : Union[str, Any] = [2_5, 5_0, 7_5]
lowercase__ : int = fuzz.membership.trimf(X, abca)
lowercase__ : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowercase__ : List[str] = np.ones(7_5)
lowercase__ : Any = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
lowercase__ : Union[str, Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowercase__ : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowercase__ : Union[str, Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowercase__ : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowercase__ : Any = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowercase__ : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowercase__ : Tuple = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowercase__ : Tuple = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 338 | 0 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE_ = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : str = PegasusConfig
__snake_case : List[Any] = {}
__snake_case : Optional[Any] = """gelu"""
def __init__( self : str ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Dict=13 ,lowerCamelCase__ : Union[str, Any]=7 ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : str=99 ,lowerCamelCase__ : Tuple=32 ,lowerCamelCase__ : List[Any]=5 ,lowerCamelCase__ : List[Any]=4 ,lowerCamelCase__ : List[str]=37 ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : List[Any]=20 ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Any=1 ,lowerCamelCase__ : int=0 ,) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ).clip(3 ,self.vocab_size )
SCREAMING_SNAKE_CASE = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) ,1 )
SCREAMING_SNAKE_CASE = np.concatenate([input_ids, eos_tensor] ,axis=1 )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
SCREAMING_SNAKE_CASE = prepare_pegasus_inputs_dict(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 20
SCREAMING_SNAKE_CASE = model_class_name(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = model.encode(inputs_dict["""input_ids"""] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) ,dtype="""i4""" )
SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] ,__SCREAMING_SNAKE_CASE ,decoder_attention_mask=__SCREAMING_SNAKE_CASE ,past_key_values=__SCREAMING_SNAKE_CASE ,decoder_position_ids=__SCREAMING_SNAKE_CASE ,)
SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" )
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] ,__SCREAMING_SNAKE_CASE ,decoder_attention_mask=__SCREAMING_SNAKE_CASE ,past_key_values=outputs_cache.past_key_values ,decoder_position_ids=__SCREAMING_SNAKE_CASE ,)
SCREAMING_SNAKE_CASE = model.decode(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=F"""Max diff is {diff}""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 20
SCREAMING_SNAKE_CASE = model_class_name(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = model.encode(inputs_dict["""input_ids"""] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
SCREAMING_SNAKE_CASE = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] ,axis=-1 ,)
SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] ,__SCREAMING_SNAKE_CASE ,decoder_attention_mask=__SCREAMING_SNAKE_CASE ,past_key_values=__SCREAMING_SNAKE_CASE ,decoder_position_ids=__SCREAMING_SNAKE_CASE ,)
SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" )
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] ,__SCREAMING_SNAKE_CASE ,past_key_values=outputs_cache.past_key_values ,decoder_attention_mask=__SCREAMING_SNAKE_CASE ,decoder_position_ids=__SCREAMING_SNAKE_CASE ,)
SCREAMING_SNAKE_CASE = model.decode(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,decoder_attention_mask=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=F"""Max diff is {diff}""" )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Optional[int]:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE = np.not_equal(snake_case__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCamelCase__ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Optional[int] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__snake_case : int = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__snake_case : int = True
__snake_case : str = False
__snake_case : Dict = False
__snake_case : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxPegasusModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
@jax.jit
def encode_jitted(lowerCamelCase__ : Dict ,lowerCamelCase__ : List[str]=None ,**lowerCamelCase__ : Dict ):
return model.encode(input_ids=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE )
with self.subTest("""JIT Enabled""" ):
SCREAMING_SNAKE_CASE = encode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE = encode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,len(__SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape ,output.shape )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = model.encode(inputs_dict["""input_ids"""] ,inputs_dict["""attention_mask"""] )
SCREAMING_SNAKE_CASE = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase__ : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ):
return model.decode(
decoder_input_ids=__SCREAMING_SNAKE_CASE ,decoder_attention_mask=__SCREAMING_SNAKE_CASE ,encoder_outputs=__SCREAMING_SNAKE_CASE ,)
with self.subTest("""JIT Enabled""" ):
SCREAMING_SNAKE_CASE = decode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE = decode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,len(__SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""google/pegasus-large""" ,from_pt=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = np.ones((1, 1) )
SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
SCREAMING_SNAKE_CASE = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
SCREAMING_SNAKE_CASE = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning \'Oh I think you\'re nominated\'\", said Dappy.\"And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around.\"At the end of the day we\'re grateful to be where we are in our careers.\"If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
SCREAMING_SNAKE_CASE = [
"""California\'s largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.""",
]
SCREAMING_SNAKE_CASE = tokenizer(__SCREAMING_SNAKE_CASE ,return_tensors="""np""" ,truncation=__SCREAMING_SNAKE_CASE ,max_length=512 ,padding=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = model.generate(**__SCREAMING_SNAKE_CASE ,num_beams=2 ).sequences
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE ,skip_special_tokens=__SCREAMING_SNAKE_CASE )
assert tgt_text == decoded
| 296 | import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->Optional[Any]:
lowerCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ):
if i == len(__SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase = -1
else:
lowerCAmelCase = timesteps[i + 1]
lowerCAmelCase = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 1, 0]
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
| 338 | 0 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowercase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : Optional[int] , *lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Any):
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE_: Tuple = eval_examples
SCREAMING_SNAKE_CASE_: Optional[int] = post_process_function
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Dict = "eval"):
SCREAMING_SNAKE_CASE_: List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE_: List[str] = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE_: List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_: Tuple = self.compute_metrics
SCREAMING_SNAKE_CASE_: Tuple = None
SCREAMING_SNAKE_CASE_: Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
SCREAMING_SNAKE_CASE_: List[Any] = time.time()
try:
SCREAMING_SNAKE_CASE_: List[str] = eval_loop(
__SCREAMING_SNAKE_CASE , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
SCREAMING_SNAKE_CASE_: List[str] = compute_metrics
SCREAMING_SNAKE_CASE_: List[Any] = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
SCREAMING_SNAKE_CASE_: Tuple = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions)
SCREAMING_SNAKE_CASE_: Any = self.compute_metrics(__SCREAMING_SNAKE_CASE)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
SCREAMING_SNAKE_CASE_: Dict = metrics.pop(__SCREAMING_SNAKE_CASE)
metrics.update(output.metrics)
else:
SCREAMING_SNAKE_CASE_: Optional[int] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE)
return metrics
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Union[str, Any] = "test"):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_test_dataloader(__SCREAMING_SNAKE_CASE)
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_: List[Any] = self.compute_metrics
SCREAMING_SNAKE_CASE_: Dict = None
SCREAMING_SNAKE_CASE_: str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
SCREAMING_SNAKE_CASE_: List[Any] = time.time()
try:
SCREAMING_SNAKE_CASE_: Union[str, Any] = eval_loop(
__SCREAMING_SNAKE_CASE , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
SCREAMING_SNAKE_CASE_: List[Any] = compute_metrics
SCREAMING_SNAKE_CASE_: Any = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE_: List[Any] = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , "predict")
SCREAMING_SNAKE_CASE_: Dict = self.compute_metrics(__SCREAMING_SNAKE_CASE)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
SCREAMING_SNAKE_CASE_: Dict = metrics.pop(__SCREAMING_SNAKE_CASE)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE)
| 13 | import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase__ : str = logging.get_logger(__name__)
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = """AutoTokenizer"""
UpperCAmelCase_ : Optional[int] = ["""tokenizer"""]
UpperCAmelCase_ : str = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = speaker_embeddings
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , **__SCREAMING_SNAKE_CASE ) ->Tuple:
if speaker_embeddings_dict_path is not None:
lowerCAmelCase = get_file_from_repo(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , )
if speaker_embeddings_path is None:
logger.warning(
F"`{os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
lowerCAmelCase = None
else:
with open(__SCREAMING_SNAKE_CASE ) as speaker_embeddings_json:
lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = None
lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return cls(tokenizer=__SCREAMING_SNAKE_CASE , speaker_embeddings=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , __SCREAMING_SNAKE_CASE="speaker_embeddings" , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ) ->int:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''v2''' ) , exist_ok=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
lowerCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , __SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}.npy" )
lowerCAmelCase = tmp_dict
with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , '''w''' ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = self.speaker_embeddings[voice_preset]
lowerCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
lowerCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , )
if path is None:
raise ValueError(
F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE )
return voice_preset_dict
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None ) ->Tuple:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="pt" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) ->int:
if voice_preset is not None and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE )
else:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not voice_preset.endswith('''.npz''' ):
lowerCAmelCase = voice_preset + '''.npz'''
lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE )
if voice_preset is not None:
self._validate_voice_preset_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCAmelCase = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if voice_preset is not None:
lowerCAmelCase = voice_preset
return encoded_text
| 338 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(snake_case__ , int(b / 2 ) ) * actual_power(snake_case__ , int(b / 2 ) )
else:
return a * actual_power(snake_case__ , int(b / 2 ) ) * actual_power(snake_case__ , int(b / 2 ) )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if b < 0:
return 1 / actual_power(snake_case__ , snake_case__ )
return actual_power(snake_case__ , snake_case__ )
if __name__ == "__main__":
print(power(-2, -3))
| 298 | import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 338 | 0 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase__ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
a : Union[str, Any] = BlenderbotSmallTokenizer
a : Tuple = False
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
UpperCamelCase__ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
UpperCamelCase__ : Tuple = dict(zip(__SCREAMING_SNAKE_CASE, range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase__ : str = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
UpperCamelCase__ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ : Dict = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def UpperCamelCase__ ( self, **__magic_name__ ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname, **__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = '''adapt act apte'''
UpperCamelCase__ : List[str] = '''adapt act apte'''
return input_text, output_text
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = BlenderbotSmallTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ : Any = '''adapt act apte'''
UpperCamelCase__ : int = ['''adapt''', '''act''', '''ap@@''', '''te''']
UpperCamelCase__ : Union[str, Any] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCamelCase__ : Union[str, Any] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ), __SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1384]
UpperCamelCase__ : Optional[int] = '''I am a small frog.'''
UpperCamelCase__ : int = tok([src_text], padding=__SCREAMING_SNAKE_CASE, truncation=__SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCamelCase__ : Tuple = tok.batch_decode(__SCREAMING_SNAKE_CASE, skip_special_tokens=__SCREAMING_SNAKE_CASE, clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : str = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
UpperCamelCase__ : int = '''I am a small frog .'''
UpperCamelCase__ : Union[str, Any] = '''.'''
UpperCamelCase__ : Union[str, Any] = tok(__SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCamelCase__ : Optional[int] = tok(__SCREAMING_SNAKE_CASE )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 201 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase__ : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowercase__ : Dict = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
lowerCAmelCase = self.transformer_dir
shutil.copy(
os.path.join(__SCREAMING_SNAKE_CASE , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Union[str, Any]:
lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' , newline='''\n''' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
self.assertTrue(f.read() , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __SCREAMING_SNAKE_CASE , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with a really long name
lowerCAmelCase = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('''Bert''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __SCREAMING_SNAKE_CASE , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
self.assertFalse(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 338 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 253 | import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = split_dict._to_yaml_list()
assert len(snake_case__ ) == len(snake_case__ )
lowerCAmelCase = SplitDict._from_yaml_list(snake_case__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=snake_case__ ), SplitInfo(dataset_name='''my_dataset''' )] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 338 | 0 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ (_a : Dict , _a : str , _a : Optional[Any] , _a : Union[str, Any]="attention" ):
UpperCAmelCase = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"]
UpperCAmelCase = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"]
UpperCAmelCase = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"]
UpperCAmelCase = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def snake_case_ (_a : int , _a : Dict , _a : Dict , _a : int=False ):
if split_mlp_wi:
UpperCAmelCase = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"]
UpperCAmelCase = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"]
UpperCAmelCase = (wi_a, wi_a)
else:
UpperCAmelCase = params[F"{prefix}/layers_{i}/mlp/wi/kernel"]
UpperCAmelCase = params[F"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def snake_case_ (_a : str , _a : str , _a : Optional[Any] , _a : Dict ):
return params[F"{prefix}/layers_{i}/{layer_name}/scale"]
def snake_case_ (_a : Optional[int] , *, _a : int , _a : Optional[int] ):
UpperCAmelCase = traverse_util.flatten_dict(variables['''target'''] )
UpperCAmelCase = {'''/'''.join(snake_case__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCAmelCase = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , snake_case__ )
UpperCAmelCase = collections.OrderedDict()
# Shared embeddings.
UpperCAmelCase = old['''token_embedder/embedding''']
# Encoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , '''encoder''' , '''pre_attention_layer_norm''' )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = tax_attention_lookup(snake_case__ , snake_case__ , '''encoder''' , '''attention''' )
UpperCAmelCase = layer_norm
UpperCAmelCase = k.T
UpperCAmelCase = o.T
UpperCAmelCase = q.T
UpperCAmelCase = v.T
# Block i, layer 1 (MLP).
UpperCAmelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , '''encoder''' , '''pre_mlp_layer_norm''' )
UpperCAmelCase , UpperCAmelCase = tax_mlp_lookup(snake_case__ , snake_case__ , '''encoder''' , snake_case__ )
UpperCAmelCase = layer_norm
if split_mlp_wi:
UpperCAmelCase = wi[0].T
UpperCAmelCase = wi[1].T
else:
UpperCAmelCase = wi.T
UpperCAmelCase = wo.T
UpperCAmelCase = old[
'''encoder/relpos_bias/rel_embedding'''
].T
UpperCAmelCase = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , '''decoder''' , '''pre_self_attention_layer_norm''' )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = tax_attention_lookup(snake_case__ , snake_case__ , '''decoder''' , '''self_attention''' )
UpperCAmelCase = layer_norm
UpperCAmelCase = k.T
UpperCAmelCase = o.T
UpperCAmelCase = q.T
UpperCAmelCase = v.T
# Block i, layer 1 (Cross Attention).
UpperCAmelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = tax_attention_lookup(snake_case__ , snake_case__ , '''decoder''' , '''encoder_decoder_attention''' )
UpperCAmelCase = layer_norm
UpperCAmelCase = k.T
UpperCAmelCase = o.T
UpperCAmelCase = q.T
UpperCAmelCase = v.T
# Block i, layer 2 (MLP).
UpperCAmelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , '''decoder''' , '''pre_mlp_layer_norm''' )
UpperCAmelCase , UpperCAmelCase = tax_mlp_lookup(snake_case__ , snake_case__ , '''decoder''' , snake_case__ )
UpperCAmelCase = layer_norm
if split_mlp_wi:
UpperCAmelCase = wi[0].T
UpperCAmelCase = wi[1].T
else:
UpperCAmelCase = wi.T
UpperCAmelCase = wo.T
UpperCAmelCase = old['''decoder/decoder_norm/scale''']
UpperCAmelCase = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCAmelCase = old['''decoder/logits_dense/kernel'''].T
return new
def snake_case_ (_a : Tuple , _a : Union[str, Any] ):
UpperCAmelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
UpperCAmelCase = state_dict['''shared.weight''']
return state_dict
def snake_case_ (_a : Union[str, Any] , _a : Optional[Any] , _a : int , _a : Any ):
UpperCAmelCase = checkpoints.load_tax_checkpoint(snake_case__ )
UpperCAmelCase = convert_tax_to_pytorch(snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ )
UpperCAmelCase = make_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ , strict=snake_case__ )
def snake_case_ (_a : Tuple , _a : List[Any] , _a : Dict , _a : Dict = False ):
UpperCAmelCase = TaConfig.from_json_file(snake_case__ )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCAmelCase = TaEncoderModel(snake_case__ )
else:
UpperCAmelCase = TaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(snake_case__ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case__ )
print('''Done''' )
if __name__ == "__main__":
A =argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
A =parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 34 | import unittest
import numpy as np
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ) -> np.ndarray:
lowerCAmelCase = np.shape(snake_case__ )
lowerCAmelCase = np.shape(snake_case__ )
lowerCAmelCase = np.shape(snake_case__ )
if shape_a[0] != shape_b[0]:
lowerCAmelCase = (
'''Expected the same number of rows for A and B. '''
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(snake_case__ )
if shape_b[1] != shape_c[1]:
lowerCAmelCase = (
'''Expected the same number of columns for B and C. '''
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(snake_case__ )
lowerCAmelCase = pseudo_inv
if a_inv is None:
try:
lowerCAmelCase = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
lowerCAmelCase = schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.block([[a, b], [b.T, c]] )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 338 | 0 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ) -> List[str]:
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=__SCREAMING_SNAKE_CASE , )
def _lowercase ( self : int , _a : List[Any] , _a : List[str] ) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def _lowercase ( self : str , _a : str , _a : Dict ) -> Dict:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__SCREAMING_SNAKE_CASE )
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _lowercase ( self : str ) -> List[Any]:
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=__SCREAMING_SNAKE_CASE , )
def _lowercase ( self : str , _a : int , _a : str ) -> Optional[int]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def _lowercase ( self : Tuple , _a : str , _a : Tuple ) -> str:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__SCREAMING_SNAKE_CASE )
def a_ ( ) -> str:
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def a_ ( ) -> Tuple:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class lowerCamelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
@require_beam
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCamelCase : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCamelCase : Optional[Any] = DummyBeamDataset(cache_dir=__SCREAMING_SNAKE_CASE , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__SCREAMING_SNAKE_CASE , builder.name , 'default' , '0.0.0' , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
__lowerCamelCase : List[Any] = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , __SCREAMING_SNAKE_CASE )
self.assertEqual(dset['train'].info.splits['train'].num_examples , __SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _lowercase ( self : Dict ) -> Any:
import apache_beam as beam
__lowerCamelCase : Optional[int] = beam.io.parquetio.WriteToParquet
__lowerCamelCase : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCamelCase : str = DummyBeamDataset(cache_dir=__SCREAMING_SNAKE_CASE , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
__lowerCamelCase : int = partial(__SCREAMING_SNAKE_CASE , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__SCREAMING_SNAKE_CASE , builder.name , 'default' , '0.0.0' , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__SCREAMING_SNAKE_CASE , builder.name , 'default' , '0.0.0' , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
__lowerCamelCase : Optional[int] = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , __SCREAMING_SNAKE_CASE )
self.assertEqual(dset['train'].info.splits['train'].num_examples , __SCREAMING_SNAKE_CASE )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def _lowercase ( self : Optional[Any] ) -> int:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCamelCase : Dict = DummyBeamDataset(cache_dir=__SCREAMING_SNAKE_CASE )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _lowercase ( self : int ) -> Tuple:
__lowerCamelCase : str = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCamelCase : Optional[Any] = NestedBeamDataset(cache_dir=__SCREAMING_SNAKE_CASE , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__SCREAMING_SNAKE_CASE , builder.name , 'default' , '0.0.0' , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
__lowerCamelCase : List[Any] = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , __SCREAMING_SNAKE_CASE )
self.assertEqual(dset['train'].info.splits['train'].num_examples , __SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 208 | import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowercase__ : Any = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
lowerCAmelCase = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
lowercase__ : List[Any] = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = list(s_dict.keys() )
for key in keys:
lowerCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowerCAmelCase = new_key.replace(snake_case__ , snake_case__ )
print(f"{key} -> {new_key}" )
lowerCAmelCase = s_dict.pop(snake_case__ )
return s_dict
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = emb.weight.shape
lowerCAmelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
lowerCAmelCase = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> bytes:
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase = os.path.basename(snake_case__ )
lowerCAmelCase = url.split('''/''' )[-2]
lowerCAmelCase = os.path.join(snake_case__ , snake_case__ )
if os.path.exists(snake_case__ ) and not os.path.isfile(snake_case__ ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(snake_case__ ):
lowerCAmelCase = open(snake_case__ , '''rb''' ).read()
if hashlib.shaaaa(snake_case__ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(snake_case__ ) as source, open(snake_case__ , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=snake_case__ , unit_divisor=1_0_2_4 ) as loop:
while True:
lowerCAmelCase = source.read(8_1_9_2 )
if not buffer:
break
output.write(snake_case__ )
loop.update(len(snake_case__ ) )
lowerCAmelCase = open(snake_case__ , '''rb''' ).read()
if hashlib.shaaaa(snake_case__ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
if ".pt" not in checkpoint_path:
lowerCAmelCase = _download(_MODELS[checkpoint_path] )
else:
lowerCAmelCase = torch.load(snake_case__ , map_location='''cpu''' )
lowerCAmelCase = original_checkpoint['''dims''']
lowerCAmelCase = original_checkpoint['''model_state_dict''']
lowerCAmelCase = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(snake_case__ )
rename_keys(snake_case__ )
lowerCAmelCase = True
lowerCAmelCase = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
lowerCAmelCase = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=snake_case__ , decoder_ffn_dim=snake_case__ , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
lowerCAmelCase = WhisperForConditionalGeneration(snake_case__ )
lowerCAmelCase , lowerCAmelCase = model.model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0 and not set(snake_case__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCAmelCase = proj_out_weights
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase__ : int = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 338 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase ( UpperCamelCase_ ):
lowerCamelCase : str = (DDPMScheduler,)
def UpperCAmelCase__ (self , **A ):
lowerCamelCase_ : List[Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def UpperCAmelCase__ (self ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ (self ):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ (self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ (self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ (self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ (self ):
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__ (self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ (self ):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.scheduler_classes[0]
lowerCamelCase_ : Any = self.get_scheduler_config()
lowerCamelCase_ : int = scheduler_class(**__SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.scheduler_classes[0]
lowerCamelCase_ : List[Any] = self.get_scheduler_config()
lowerCamelCase_ : Tuple = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Any = len(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : str = self.dummy_model()
lowerCamelCase_ : Dict = self.dummy_sample_deter
lowerCamelCase_ : Dict = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCamelCase_ : Tuple = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ : List[Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ : str = pred_prev_sample
lowerCamelCase_ : Optional[Any] = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ : Tuple = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.33_72 ) < 1E-3
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.scheduler_classes[0]
lowerCamelCase_ : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase_ : List[Any] = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : List[str] = len(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : str = self.dummy_model()
lowerCamelCase_ : int = self.dummy_sample_deter
lowerCamelCase_ : Tuple = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCamelCase_ : Optional[int] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ : str = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ : Optional[int] = pred_prev_sample
lowerCamelCase_ : List[str] = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ : List[str] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.26_31 ) < 1E-3
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase_ : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase_ : List[Any] = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Tuple = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Tuple = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ):
if i == len(__SCREAMING_SNAKE_CASE ) - 1:
lowerCamelCase_ : Dict = -1
else:
lowerCamelCase_ : Tuple = timesteps[i + 1]
lowerCamelCase_ : Optional[Any] = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : str = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self.scheduler_classes[0]
lowerCamelCase_ : List[str] = self.get_scheduler_config()
lowerCamelCase_ : int = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Any = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase_ : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase_ : List[str] = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Optional[int] = [1_0_0, 8_7, 5_0, 1, 0]
lowerCamelCase_ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.scheduler_classes[0]
lowerCamelCase_ : List[Any] = self.get_scheduler_config()
lowerCamelCase_ : Any = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
| 318 | from ...processing_utils import ProcessorMixin
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = ["""image_processor""", """feature_extractor"""]
UpperCAmelCase_ : Optional[int] = """TvltImageProcessor"""
UpperCAmelCase_ : Optional[int] = """TvltFeatureExtractor"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
super().__init__(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor
lowerCAmelCase = feature_extractor
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) ->List[Any]:
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
lowerCAmelCase = None
if images is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , mask_pixel=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if images_mixed is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , is_mixed=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if audio is not None:
lowerCAmelCase = self.feature_extractor(
__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , mask_audio=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
if audio is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images_mixed_dict is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
return output_dict
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.image_processor.model_input_names
lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 338 | 0 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCamelCase : Any = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
lowerCamelCase : Dict = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
lowerCamelCase : Optional[int] = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
lowerCamelCase : List[Any] = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
lowerCamelCase : List[Any] = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] ):
for tf_name, hf_name in patterns:
__lowercase : str = k.replace(snake_case__ , snake_case__ )
return k
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict ):
__lowercase : Union[str, Any] = BigBirdPegasusConfig(**snake_case__ )
__lowercase : str = BigBirdPegasusForConditionalGeneration(snake_case__ )
__lowercase : Tuple = torch_model.state_dict()
__lowercase : Tuple = {}
# separating decoder weights
__lowercase : Union[str, Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
__lowercase : List[str] = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
__lowercase : Tuple = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__lowercase : Optional[int] = DECODER_PATTERNS
__lowercase : Dict = rename_state_dict_key(snake_case__ , snake_case__ )
if new_k not in state_dict:
raise ValueError(F"could not find new key {new_k} in state dict. (converted from {k})" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
__lowercase : int = v.T
__lowercase : Tuple = torch.from_numpy(snake_case__ )
assert v.shape == state_dict[new_k].shape, F"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
__lowercase : List[Any] = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__lowercase : str = REMAINING_PATTERNS
__lowercase : str = rename_state_dict_key(snake_case__ , snake_case__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"could not find new key {new_k} in state dict. (converted from {k})" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
__lowercase : List[str] = v.T
__lowercase : Any = torch.from_numpy(snake_case__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"
__lowercase : Optional[Any] = mapping["""model.embed_positions.weight"""]
__lowercase : List[str] = mapping.pop("""model.embed_positions.weight""" )
__lowercase , __lowercase : Optional[int] = torch_model.load_state_dict(snake_case__ , strict=snake_case__ )
__lowercase : Dict = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], F"no matches found for the following tf keys {extra}"
return torch_model
def snake_case_ ( lowerCAmelCase_ : Dict ):
__lowercase : Any = tf.train.list_variables(snake_case__ )
__lowercase : int = {}
__lowercase : int = ["""global_step"""]
for name, shape in tqdm(snake_case__ , desc="""converting tf checkpoint to dict""" ):
__lowercase : Any = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowercase : Any = tf.train.load_variable(snake_case__ , snake_case__ )
__lowercase : int = array
return tf_weights
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any ):
__lowercase : Optional[Any] = get_tf_weights_as_numpy(snake_case__ )
__lowercase : Union[str, Any] = convert_bigbird_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCamelCase : Tuple = parser.parse_args()
lowerCamelCase : int = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 233 | def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[str]:
lowerCAmelCase = len(snake_case__ )
for i in range(length - 1 ):
lowerCAmelCase = i
for k in range(i + 1 , snake_case__ ):
if collection[k] < collection[least]:
lowerCAmelCase = k
if least != i:
lowerCAmelCase , lowerCAmelCase = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowercase__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
lowercase__ : str = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 338 | 0 |
'''simple docstring'''
from math import factorial
class a__:
def __init__( self : Optional[Any] , __snake_case : List[str] , __snake_case : Dict ):
a : str = real
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a : Optional[Any] = [1] * rank
else:
a : Tuple = rank
def __repr__( self : str ):
return (
F"""{self.real}+"""
F"""{"+".join(str(__SCREAMING_SNAKE_CASE )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowercase_ ( self : Optional[int] ):
a : Optional[int] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , __SCREAMING_SNAKE_CASE )
def __add__( self : str , __snake_case : Union[str, Any] ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return Dual(self.real + other , self.duals )
a : Any = self.duals.copy()
a : int = other.duals.copy()
if len(__SCREAMING_SNAKE_CASE ) > len(__SCREAMING_SNAKE_CASE ):
o_dual.extend([1] * (len(__SCREAMING_SNAKE_CASE ) - len(__SCREAMING_SNAKE_CASE )) )
elif len(__SCREAMING_SNAKE_CASE ) < len(__SCREAMING_SNAKE_CASE ):
s_dual.extend([1] * (len(__SCREAMING_SNAKE_CASE ) - len(__SCREAMING_SNAKE_CASE )) )
a : Optional[Any] = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , __SCREAMING_SNAKE_CASE )
lowercase__ = __add__
def __sub__( self : int , __snake_case : Dict ):
return self + other * -1
def __mul__( self : Any , __snake_case : List[str] ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , __SCREAMING_SNAKE_CASE )
a : Optional[Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , __SCREAMING_SNAKE_CASE )
lowercase__ = __mul__
def __truediv__( self : int , __snake_case : Tuple ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a : Tuple = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , __SCREAMING_SNAKE_CASE )
raise ValueError
def __floordiv__( self : int , __snake_case : Any ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a : Optional[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , __SCREAMING_SNAKE_CASE )
raise ValueError
def __pow__( self : int , __snake_case : List[str] ):
if n < 0 or isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
a : int = self
for _ in range(n - 1 ):
x *= self
return x
def lowerCamelCase__ ( _A , _A , _A ):
if not callable(snake_case__ ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(snake_case__ , (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('differentiate() requires an int as input for order' )
a : Union[str, Any] = Dual(snake_case__ , 1 )
a : List[str] = func(snake_case__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowerCamelCase__ ( _A ):
return y**2 * y**4
print(differentiate(f, 9, 2)) | 297 | import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=19 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) ->Union[str, Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__SCREAMING_SNAKE_CASE , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = EsmForProteinFolding(config=__SCREAMING_SNAKE_CASE ).float()
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Dict = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCAmelCase_ : List[Any] = ()
UpperCAmelCase_ : Tuple = {} if is_torch_available() else {}
UpperCAmelCase_ : List[str] = False
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = EsmFoldModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip('''Does not support attention outputs''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
pass
@unittest.skip
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold only has one output format.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
@require_torch
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
lowerCAmelCase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )['''positions''']
lowerCAmelCase = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 338 | 0 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class snake_case_ :
def __init__( self :int ,__snake_case :int ,__snake_case :Tuple=13 ,__snake_case :Optional[int]=7 ,__snake_case :Tuple=True ,__snake_case :List[Any]=True ,__snake_case :int=False ,__snake_case :str=True ,__snake_case :Optional[int]=99 ,__snake_case :int=32 ,__snake_case :List[Any]=5 ,__snake_case :Union[str, Any]=4 ,__snake_case :Tuple=37 ,__snake_case :List[str]="gelu" ,__snake_case :List[Any]=0.1 ,__snake_case :Union[str, Any]=0.1 ,__snake_case :Optional[Any]=5_12 ,__snake_case :str=16 ,__snake_case :str=2 ,__snake_case :List[Any]=0.02 ,__snake_case :Optional[int]=3 ,__snake_case :str=4 ,__snake_case :List[str]=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
def lowerCamelCase__( self :str ) -> Optional[int]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[Any]:
return OpenLlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__SCREAMING_SNAKE_CASE ,initializer_range=self.initializer_range ,use_stable_embedding=__SCREAMING_SNAKE_CASE ,)
def lowerCamelCase__( self :str ,__snake_case :List[Any] ,__snake_case :Union[str, Any] ,__snake_case :Optional[Any] ,__snake_case :Tuple ,__snake_case :Optional[Any] ,__snake_case :Dict ,__snake_case :List[str] ) -> List[Any]:
a__ = OpenLlamaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE )
a__ = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[Any] ,__snake_case :Any ,__snake_case :int ,__snake_case :List[str] ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :str ,__snake_case :Any ,) -> List[Any]:
a__ = True
a__ = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(
__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,encoder_hidden_states=__SCREAMING_SNAKE_CASE ,encoder_attention_mask=__SCREAMING_SNAKE_CASE ,)
a__ = model(
__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,encoder_hidden_states=__SCREAMING_SNAKE_CASE ,)
a__ = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__( self :int ,__snake_case :int ,__snake_case :Dict ,__snake_case :List[str] ,__snake_case :Tuple ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :str ,) -> Union[str, Any]:
a__ = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :int ,__snake_case :Any ,__snake_case :Union[str, Any] ,__snake_case :Dict ,__snake_case :Optional[int] ,__snake_case :Optional[Any] ,__snake_case :Optional[Any] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :int ,) -> Optional[int]:
a__ = True
a__ = True
a__ = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
a__ = model(
__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,encoder_hidden_states=__SCREAMING_SNAKE_CASE ,encoder_attention_mask=__SCREAMING_SNAKE_CASE ,use_cache=__SCREAMING_SNAKE_CASE ,)
a__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a__ = ids_tensor((self.batch_size, 3) ,config.vocab_size )
a__ = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
a__ = torch.cat([input_ids, next_tokens] ,dim=-1 )
a__ = torch.cat([input_mask, next_mask] ,dim=-1 )
a__ = model(
__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,encoder_hidden_states=__SCREAMING_SNAKE_CASE ,encoder_attention_mask=__SCREAMING_SNAKE_CASE ,output_hidden_states=__SCREAMING_SNAKE_CASE ,)['hidden_states'][0]
a__ = model(
__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,encoder_hidden_states=__SCREAMING_SNAKE_CASE ,encoder_attention_mask=__SCREAMING_SNAKE_CASE ,past_key_values=__SCREAMING_SNAKE_CASE ,output_hidden_states=__SCREAMING_SNAKE_CASE ,)['hidden_states'][0]
# select random slice
a__ = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
a__ = output_from_no_past[:, -3:, random_slice_idx].detach()
a__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,atol=1E-3 ) )
def lowerCamelCase__( self :Tuple ) -> Dict:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case_ (UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
UpperCAmelCase__ : Union[str, Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :int ) -> int:
a__ = OpenLlamaModelTester(self )
a__ = ConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,hidden_size=37 )
def lowerCamelCase__( self :Optional[int] ) -> int:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :List[Any] ) -> List[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__( self :List[str] ) -> Dict:
a__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__( self :Dict ) -> str:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = 3
a__ = input_dict['input_ids']
a__ = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
a__ = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
a__ = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__( self :Any ) -> str:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = 3
a__ = 'single_label_classification'
a__ = input_dict['input_ids']
a__ = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
a__ = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
a__ = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__( self :Optional[Any] ) -> List[Any]:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = 3
a__ = 'multi_label_classification'
a__ = input_dict['input_ids']
a__ = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
a__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
a__ = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def lowerCamelCase__( self :int ) -> int:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCamelCase__( self :int ,__snake_case :List[Any] ) -> Dict:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = ids_tensor([1, 10] ,config.vocab_size )
a__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a__ = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
original_model.to(__SCREAMING_SNAKE_CASE )
original_model.eval()
a__ = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
a__ = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a__ = {'type': scaling_type, 'factor': 10.0}
a__ = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
scaled_model.to(__SCREAMING_SNAKE_CASE )
scaled_model.eval()
a__ = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
a__ = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,atol=1E-5 ) )
| 240 | import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = ["""image_processor""", """tokenizer"""]
UpperCAmelCase_ : int = """OwlViTImageProcessor"""
UpperCAmelCase_ : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
lowerCAmelCase = kwargs.pop('''feature_extractor''' )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="max_length" , __SCREAMING_SNAKE_CASE="np" , **__SCREAMING_SNAKE_CASE ) ->int:
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or (isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(text[0] , __SCREAMING_SNAKE_CASE )):
lowerCAmelCase = [self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )]
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(text[0] , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = []
# Maximum number of queries across batch
lowerCAmelCase = max([len(__SCREAMING_SNAKE_CASE ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__SCREAMING_SNAKE_CASE ) != max_num_queries:
lowerCAmelCase = t + [''' '''] * (max_num_queries - len(__SCREAMING_SNAKE_CASE ))
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
encodings.append(__SCREAMING_SNAKE_CASE )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
lowerCAmelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCAmelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCAmelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
lowerCAmelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCAmelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = input_ids
lowerCAmelCase = attention_mask
if query_images is not None:
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).pixel_values
lowerCAmelCase = query_pixel_values
if images is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) , tensor_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Optional[int]:
return self.image_processor.post_process(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Any:
return self.image_processor.post_process_object_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Tuple:
return self.image_processor.post_process_image_guided_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->str:
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 338 | 0 |
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = F"""Input value of [number={number}] must be an integer"""
raise TypeError(snake_case__ )
if number < 0:
return False
SCREAMING_SNAKE_CASE = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 296 | import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''spiece.model'''}
lowercase__ : Optional[int] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
lowercase__ : Any = {
'''albert-base-v1''': 5_1_2,
'''albert-large-v1''': 5_1_2,
'''albert-xlarge-v1''': 5_1_2,
'''albert-xxlarge-v1''': 5_1_2,
'''albert-base-v2''': 5_1_2,
'''albert-large-v2''': 5_1_2,
'''albert-xlarge-v2''': 5_1_2,
'''albert-xxlarge-v2''': 5_1_2,
}
lowercase__ : Tuple = '''▁'''
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) ->None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase = (
AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE , normalized=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else mask_token
)
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) ->int:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Any:
if self.remove_space:
lowerCAmelCase = ''' '''.join(inputs.strip().split() )
else:
lowerCAmelCase = inputs
lowerCAmelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowerCAmelCase = unicodedata.normalize('''NFKD''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = ''''''.join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
lowerCAmelCase = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = self.preprocess_text(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
for piece in pieces:
if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase = cur_pieces[1:]
else:
lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__SCREAMING_SNAKE_CASE )
else:
new_pieces.append(__SCREAMING_SNAKE_CASE )
return new_pieces
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
lowerCAmelCase = []
lowerCAmelCase = ''''''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 338 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowerCAmelCase : str = logging.get_logger(__name__)
class __lowercase ( UpperCamelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[str] = """upernet"""
def __init__( self : Optional[int] , lowerCAmelCase__ : str=None , lowerCAmelCase__ : int=512 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : Union[str, Any]=[1, 2, 3, 6] , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Tuple=0.4 , lowerCAmelCase__ : List[str]=384 , lowerCAmelCase__ : List[str]=256 , lowerCAmelCase__ : int=1 , lowerCAmelCase__ : str=False , lowerCAmelCase__ : List[str]=255 , **lowerCAmelCase__ : Union[str, Any] , ):
super().__init__(**__SCREAMING_SNAKE_CASE)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
SCREAMING_SNAKE_CASE_: Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
SCREAMING_SNAKE_CASE_: Union[str, Any] = backbone_config.get("model_type")
SCREAMING_SNAKE_CASE_: List[Any] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_: int = config_class.from_dict(__SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE_: List[str] = backbone_config
SCREAMING_SNAKE_CASE_: List[str] = hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_: Optional[Any] = pool_scales
SCREAMING_SNAKE_CASE_: Any = use_auxiliary_head
SCREAMING_SNAKE_CASE_: Union[str, Any] = auxiliary_loss_weight
SCREAMING_SNAKE_CASE_: Dict = auxiliary_in_channels
SCREAMING_SNAKE_CASE_: Optional[int] = auxiliary_channels
SCREAMING_SNAKE_CASE_: Optional[int] = auxiliary_num_convs
SCREAMING_SNAKE_CASE_: Optional[Any] = auxiliary_concat_input
SCREAMING_SNAKE_CASE_: Any = loss_ignore_index
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = copy.deepcopy(self.__dict__)
SCREAMING_SNAKE_CASE_: int = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_: str = self.__class__.model_type
return output
| 13 | import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = (DEISMultistepScheduler,)
UpperCAmelCase_ : int = (("""num_inference_steps""", 25),)
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->str:
lowerCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase , lowerCAmelCase = sample, sample
for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
if scheduler is None:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase = scheduler.timesteps[5]
lowerCAmelCase = scheduler.timesteps[6]
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , algorithm_type='''deis''' , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = self.full_loop(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
assert not torch.isnan(__SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
| 338 | 0 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class A ( UpperCamelCase_ ):
'''simple docstring'''
A = """owlvit_text_model"""
def __init__(self , _UpperCAmelCase=4_9_4_0_8 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=2_0_4_8 , _UpperCAmelCase=1_2 , _UpperCAmelCase=8 , _UpperCAmelCase=1_6 , _UpperCAmelCase="quick_gelu" , _UpperCAmelCase=1E-5 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1.0 , _UpperCAmelCase=0 , _UpperCAmelCase=4_9_4_0_6 , _UpperCAmelCase=4_9_4_0_7 , **_UpperCAmelCase , ) -> List[Any]:
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__UpperCamelCase : Optional[int] = vocab_size
__UpperCamelCase : List[str] = hidden_size
__UpperCamelCase : Any = intermediate_size
__UpperCamelCase : List[Any] = num_hidden_layers
__UpperCamelCase : Any = num_attention_heads
__UpperCamelCase : Dict = max_position_embeddings
__UpperCamelCase : List[str] = hidden_act
__UpperCamelCase : int = layer_norm_eps
__UpperCamelCase : Union[str, Any] = attention_dropout
__UpperCamelCase : Union[str, Any] = initializer_range
__UpperCamelCase : str = initializer_factor
@classmethod
def a_ (cls , _UpperCAmelCase , **_UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase : List[Any] = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
__UpperCamelCase : int = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class A ( UpperCamelCase_ ):
'''simple docstring'''
A = """owlvit_vision_model"""
def __init__(self , _UpperCAmelCase=7_6_8 , _UpperCAmelCase=3_0_7_2 , _UpperCAmelCase=1_2 , _UpperCAmelCase=1_2 , _UpperCAmelCase=3 , _UpperCAmelCase=7_6_8 , _UpperCAmelCase=3_2 , _UpperCAmelCase="quick_gelu" , _UpperCAmelCase=1E-5 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1.0 , **_UpperCAmelCase , ) -> Union[str, Any]:
super().__init__(**__SCREAMING_SNAKE_CASE )
__UpperCamelCase : Tuple = hidden_size
__UpperCamelCase : List[str] = intermediate_size
__UpperCamelCase : str = num_hidden_layers
__UpperCamelCase : List[Any] = num_attention_heads
__UpperCamelCase : List[Any] = num_channels
__UpperCamelCase : List[Any] = image_size
__UpperCamelCase : Optional[int] = patch_size
__UpperCamelCase : Any = hidden_act
__UpperCamelCase : Optional[int] = layer_norm_eps
__UpperCamelCase : Any = attention_dropout
__UpperCamelCase : str = initializer_range
__UpperCamelCase : int = initializer_factor
@classmethod
def a_ (cls , _UpperCAmelCase , **_UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase : Optional[int] = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
__UpperCamelCase : str = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class A ( UpperCamelCase_ ):
'''simple docstring'''
A = """owlvit"""
A = True
def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=2.6_592 , _UpperCAmelCase=True , **_UpperCAmelCase , ) -> List[Any]:
super().__init__(**__SCREAMING_SNAKE_CASE )
if text_config is None:
__UpperCamelCase : str = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
__UpperCamelCase : Any = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
__UpperCamelCase : int = OwlViTTextConfig(**__SCREAMING_SNAKE_CASE )
__UpperCamelCase : Dict = OwlViTVisionConfig(**__SCREAMING_SNAKE_CASE )
__UpperCamelCase : Tuple = projection_dim
__UpperCamelCase : Any = logit_scale_init_value
__UpperCamelCase : Tuple = return_dict
__UpperCamelCase : Any = 1.0
@classmethod
def a_ (cls , _UpperCAmelCase , **_UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase : Any = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@classmethod
def a_ (cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any:
__UpperCamelCase : Optional[int] = {}
__UpperCamelCase : List[Any] = text_config
__UpperCamelCase : Union[str, Any] = vision_config
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCamelCase : Tuple = self.text_config.to_dict()
__UpperCamelCase : Dict = self.vision_config.to_dict()
__UpperCamelCase : Any = self.__class__.model_type
return output
class A ( UpperCamelCase_ ):
'''simple docstring'''
@property
def a_ (self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def a_ (self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def a_ (self ) -> float:
return 1E-4
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = None , ) -> Mapping[str, Any]:
__UpperCamelCase : Union[str, Any] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
__UpperCamelCase : Optional[int] = super().generate_dummy_inputs(
processor.image_processor , batch_size=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
return {**text_input_dict, **image_input_dict}
@property
def a_ (self ) -> int:
return 1_4
| 298 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = '''google/ncsnpp-celebahq-256'''
lowerCAmelCase = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 338 | 0 |
from __future__ import annotations
import time
import numpy as np
UpperCAmelCase_ = [8, 5, 9, 7]
UpperCAmelCase_ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCAmelCase_ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__, __magic_name__, ) -> None:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = claim_vector
UpperCamelCase__ : List[str] = allocated_resources_table
UpperCamelCase__ : List[str] = maximum_claim_table
def UpperCamelCase__ ( self ) -> list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCamelCase__ ( self ) -> list[int]:
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCamelCase__ ( self ) -> list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__SCREAMING_SNAKE_CASE ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCamelCase__ ( self ) -> dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(__SCREAMING_SNAKE_CASE ): i for i in self.__need()}
def UpperCamelCase__ ( self, **__magic_name__ ) -> None:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.__need()
UpperCamelCase__ : Optional[Any] = self.__allocated_resources_table
UpperCamelCase__ : Optional[Any] = self.__available_resources()
UpperCamelCase__ : Dict = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
UpperCamelCase__ : List[str] = False
for each_need in need_list:
UpperCamelCase__ : int = True
for index, need in enumerate(__SCREAMING_SNAKE_CASE ):
if need > available_resources[index]:
UpperCamelCase__ : List[Any] = False
break
if execution:
UpperCamelCase__ : Tuple = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCamelCase__ : Dict = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(__SCREAMING_SNAKE_CASE )
# update available/freed resources stack
UpperCamelCase__ : List[str] = np.array(__SCREAMING_SNAKE_CASE ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__SCREAMING_SNAKE_CASE ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(__SCREAMING_SNAKE_CASE ) + 1}"
+ ''' '''.join(f"{it:>8}" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(__SCREAMING_SNAKE_CASE ) + 1}"
+ ''' '''.join(f"{it:>8}" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__SCREAMING_SNAKE_CASE ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__SCREAMING_SNAKE_CASE ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201 | from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowercase__ : Dict = logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase_ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
lowerCAmelCase = self.get_masked_index(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Dict[str, GenericTensor]:
if return_tensors is None:
lowerCAmelCase = self.framework
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = self.model(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model_inputs['''input_ids''']
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=None ) ->str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase = target_ids.shape[0]
lowerCAmelCase = model_outputs['''input_ids'''][0]
lowerCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowerCAmelCase = outputs.numpy()
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
lowerCAmelCase = tf.gather_nd(tf.squeeze(__SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
lowerCAmelCase = tf.math.top_k(__SCREAMING_SNAKE_CASE , k=__SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
lowerCAmelCase = probs[..., target_ids]
lowerCAmelCase , lowerCAmelCase = probs.topk(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
lowerCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowerCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowerCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase = target_ids[p].tolist()
lowerCAmelCase = p
# Filter padding out:
lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [targets]
try:
lowerCAmelCase = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase = {}
lowerCAmelCase = []
for target in targets:
lowerCAmelCase = vocab.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if id_ is None:
lowerCAmelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , max_length=1 , truncation=__SCREAMING_SNAKE_CASE , )['''input_ids''']
if len(__SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
lowerCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
lowerCAmelCase = list(set(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
return target_ids
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->Dict:
lowerCAmelCase = {}
if targets is not None:
lowerCAmelCase = self.get_target_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = target_ids
if top_k is not None:
lowerCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 338 | 0 |
import math
import sys
import cva
import numpy as np
def A_ ( a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = math.sqrt(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def A_ ( a , a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def A_ ( a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.zeros((kernel_size, kernel_size) )
for i in range(0 , snake_case__ ):
for j in range(0 , snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(snake_case__ , snake_case__ )
def A_ ( a , a , a , a , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.zeros(img.shape )
SCREAMING_SNAKE_CASE_ : str = get_gauss_kernel(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_slice(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = img_s - img_s[kernel_size // 2, kernel_size // 2]
SCREAMING_SNAKE_CASE_ : List[str] = vec_gaussian(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = np.multiply(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = np.multiply(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = np.sum(snake_case__ ) / np.sum(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = val
return imga
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = args[1] if args[1:] else '../image_data/lena.jpg'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = float(args[2] ) if args[2:] else 1.0
SCREAMING_SNAKE_CASE_ : List[str] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
SCREAMING_SNAKE_CASE_ : Optional[int] = int(args[4] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = kernel_size + abs(kernel_size % 2 - 1 )
else:
SCREAMING_SNAKE_CASE_ : List[str] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = parse_args(sys.argv)
lowerCAmelCase : Optional[int] = cva.imread(filename, 0)
cva.imshow('input image', img)
lowerCAmelCase : List[str] = img / 2_55
lowerCAmelCase : int = out.astype('float32')
lowerCAmelCase : int = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowerCAmelCase : Optional[int] = out * 2_55
lowerCAmelCase : Union[str, Any] = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 253 | from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase__ : int = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowercase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 338 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( UpperCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = FunnelTokenizer
__a : List[str] = FunnelTokenizerFast
__a : Optional[int] = True
__a : Any = True
def A ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def A ( self : Union[str, Any] , **lowercase : Optional[int] ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def A ( self : int , **lowercase : List[str] ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def A ( self : str , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = '''UNwant\u00E9d,running'''
UpperCAmelCase = '''unwanted, running'''
return input_text, output_text
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class(self.vocab_file )
UpperCAmelCase = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [7, 4, 5, 10, 8, 9] )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizers(do_lower_case=__SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
UpperCAmelCase = tokenizer('''UNwant\u00E9d,running''' )
UpperCAmelCase = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
UpperCAmelCase = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 34 | lowercase__ : Optional[int] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def SCREAMING_SNAKE_CASE_ ( ) -> None:
lowerCAmelCase = input('''Enter message: ''' )
lowerCAmelCase = input('''Enter key [alphanumeric]: ''' )
lowerCAmelCase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowerCAmelCase = '''encrypt'''
lowerCAmelCase = encrypt_message(snake_case__ , snake_case__ )
elif mode.lower().startswith('''d''' ):
lowerCAmelCase = '''decrypt'''
lowerCAmelCase = decrypt_message(snake_case__ , snake_case__ )
print(f"\n{mode.title()}ed message:" )
print(snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
return translate_message(snake_case__ , snake_case__ , '''encrypt''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
return translate_message(snake_case__ , snake_case__ , '''decrypt''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> str:
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = key.upper()
for symbol in message:
lowerCAmelCase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case__ ):
lowerCAmelCase = 0
else:
translated.append(snake_case__ )
return "".join(snake_case__ )
if __name__ == "__main__":
main()
| 338 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
_UpperCamelCase = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
_UpperCamelCase = '''</w>'''
_UpperCamelCase = '''@@ '''
def a_ ( _lowerCAmelCase ) -> List[str]:
__lowerCamelCase : Optional[int] = set()
__lowerCamelCase : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase : str = char
return pairs
# Speech2Text2 has no max input length
_UpperCamelCase = {'''facebook/s2t-wav2vec2-large-en-de''': 1024}
class lowerCamelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =["""input_ids""", """attention_mask"""]
def __init__( self : Dict , _a : List[Any] , _a : Any="<s>" , _a : Any="<pad>" , _a : str="</s>" , _a : Tuple="<unk>" , _a : Optional[int]=False , _a : List[Any]=None , **_a : List[Any] , ) -> Optional[Any]:
super().__init__(
unk_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__lowerCamelCase : Any = do_lower_case
with open(__SCREAMING_SNAKE_CASE , encoding='utf-8' ) as vocab_handle:
__lowerCamelCase : int = json.load(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : str = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'No merges files provided. {self.__class__.__name__} can only be used for decoding.' )
__lowerCamelCase : int = None
__lowerCamelCase : int = None
else:
with open(__SCREAMING_SNAKE_CASE , encoding='utf-8' ) as merges_handle:
__lowerCamelCase : Any = merges_handle.read().split('\n' )[:-1]
__lowerCamelCase : Any = [tuple(merge.split()[:2] ) for merge in merges]
__lowerCamelCase : Dict = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__lowerCamelCase : Union[str, Any] = {}
@property
def _lowercase ( self : Optional[Any] ) -> int:
return len(self.decoder )
def _lowercase ( self : Any ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : List[Any] , _a : Optional[int] ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
__lowerCamelCase : List[str] = get_pairs(__SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
__lowerCamelCase : Tuple = min(__SCREAMING_SNAKE_CASE , key=lambda _a : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase ,__lowerCamelCase : str = bigram
__lowerCamelCase : Any = []
__lowerCamelCase : Dict = 0
while i < len(__SCREAMING_SNAKE_CASE ):
try:
__lowerCamelCase : Optional[int] = word.index(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCamelCase : Dict = j
if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase : Union[str, Any] = tuple(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Union[str, Any] = new_word
if len(__SCREAMING_SNAKE_CASE ) == 1:
break
else:
__lowerCamelCase : str = get_pairs(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[Any] = ' '.join(__SCREAMING_SNAKE_CASE )
if word == "\n " + BPE_TOKEN_MERGES:
__lowerCamelCase : Dict = '\n' + BPE_TOKEN_MERGES
if word.endswith(__SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Dict = word.replace(__SCREAMING_SNAKE_CASE , '' )
__lowerCamelCase : Union[str, Any] = word.replace(' ' , __SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[str] = word
return word
def _lowercase ( self : List[str] , _a : Union[str, Any] ) -> Dict:
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
__lowerCamelCase : str = text.lower()
__lowerCamelCase : int = text.split()
__lowerCamelCase : Any = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__SCREAMING_SNAKE_CASE ).split(' ' ) ) )
return split_tokens
def _lowercase ( self : Optional[int] , _a : Union[str, Any] ) -> int:
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def _lowercase ( self : List[str] , _a : Any ) -> str:
__lowerCamelCase : Any = self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token )
return result
def _lowercase ( self : Tuple , _a : str ) -> str:
__lowerCamelCase : Union[str, Any] = ' '.join(__SCREAMING_SNAKE_CASE )
# make sure @@ tokens are concatenated
__lowerCamelCase : Any = ''.join(string.split(__SCREAMING_SNAKE_CASE ) )
return string
def _lowercase ( self : List[str] , _a : Tuple , _a : int = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase : Any = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : str = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE ) + '\n' )
__lowerCamelCase : Optional[Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
__lowerCamelCase : Optional[Any] = token_index
writer.write(' '.join(__SCREAMING_SNAKE_CASE ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 208 | from collections import defaultdict
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0_0_0_0_0 , snake_case__ = 1_0 ) -> int:
lowerCAmelCase = defaultdict(snake_case__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCAmelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCAmelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(snake_case__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f'{solution() = }')
| 338 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
def __init__(self , A , A=1_3 , A=3_2 , A=3 , A=4 , A=[1_0, 2_0, 3_0, 4_0] , A=[2, 2, 3, 2] , A=True , A=True , A=3_7 , A="gelu" , A=1_0 , A=0.02 , A=["stage2", "stage3", "stage4"] , A=3 , A=None , ):
lowerCamelCase_ : Any = parent
lowerCamelCase_ : Optional[Any] = batch_size
lowerCamelCase_ : List[Any] = image_size
lowerCamelCase_ : Tuple = num_channels
lowerCamelCase_ : List[str] = num_stages
lowerCamelCase_ : Optional[Any] = hidden_sizes
lowerCamelCase_ : Optional[int] = depths
lowerCamelCase_ : Optional[Any] = is_training
lowerCamelCase_ : Any = use_labels
lowerCamelCase_ : Optional[Any] = intermediate_size
lowerCamelCase_ : List[Any] = hidden_act
lowerCamelCase_ : Tuple = type_sequence_label_size
lowerCamelCase_ : str = initializer_range
lowerCamelCase_ : Union[str, Any] = out_features
lowerCamelCase_ : Optional[Any] = num_labels
lowerCamelCase_ : Tuple = scope
lowerCamelCase_ : List[Any] = num_stages
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ : int = None
if self.use_labels:
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ (self ):
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def UpperCAmelCase__ (self ):
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__SCREAMING_SNAKE_CASE , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=__SCREAMING_SNAKE_CASE , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def UpperCAmelCase__ (self , A , A , A ):
lowerCamelCase_ : Any = UperNetForSemanticSegmentation(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ : str = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) : List[Any] = config_and_inputs
lowerCamelCase_ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowerCamelCase : Optional[int] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCamelCase : str = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCamelCase : Any = False
lowerCamelCase : Dict = False
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Dict = False
lowerCamelCase : Optional[Any] = False
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = UperNetModelTester(self )
lowerCamelCase_ : List[str] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def UpperCAmelCase__ (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ (self ):
return
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : int = [*signature.parameters.keys()]
lowerCamelCase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def UpperCAmelCase__ (self ):
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def UpperCAmelCase__ (self ):
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def UpperCAmelCase__ (self ):
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def UpperCAmelCase__ (self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def UpperCAmelCase__ (self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ (self ):
pass
def UpperCAmelCase__ (self ):
def check_hidden_states_output(A , A , A ):
lowerCamelCase_ : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCamelCase_ : Tuple = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ : Any = self.model_tester.num_stages
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase_, lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Dict = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ : Optional[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Optional[int] = _config_zero_init(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : List[str] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCamelCase_ : str = model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def UpperCAmelCase__ (self ):
pass
@slow
def UpperCAmelCase__ (self ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : int = UperNetForSemanticSegmentation.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def lowercase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase_ : List[str] = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
lowerCamelCase_ : str = Image.open(snake_case__ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
lowerCamelCase_ : Any = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Optional[Any] = prepare_img()
lowerCamelCase_ : Any = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowerCamelCase_ : Optional[Any] = model(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : int = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Any = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
lowerCamelCase_ : str = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Optional[Any] = prepare_img()
lowerCamelCase_ : str = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowerCamelCase_ : str = model(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : str = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Tuple = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 318 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Union[str, Any]:
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = features.copy() if features else default_expected_features
lowerCAmelCase = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase = TextDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
if issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase = text_path
elif issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase = [text_path]
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=("train",) ) -> Optional[Any]:
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase = TextDatasetReader({'''train''': text_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCAmelCase = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = features.copy() if features else default_expected_features
lowerCAmelCase = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase = TextDatasetReader({'''train''': text_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any:
if split:
lowerCAmelCase = {split: text_path}
else:
lowerCAmelCase = '''train'''
lowerCAmelCase = {'''train''': text_path, '''test''': text_path}
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 338 | 0 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
_A : Tuple = DistilBertTokenizer
_A : Dict = DistilBertTokenizerFast
_A : Optional[Any] = True
@slow
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : int = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
__lowercase : int = tokenizer.encode("""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
__lowercase : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
__lowercase : int = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
__lowercase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
] | 233 | def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
lowerCAmelCase = False
if num < 0:
lowerCAmelCase = True
lowerCAmelCase = -num
lowerCAmelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class a__( UpperCamelCase_ ):
lowercase__ = ["""image_processor""", """feature_extractor"""]
lowercase__ = """TvltImageProcessor"""
lowercase__ = """TvltFeatureExtractor"""
def __init__( self : int , __snake_case : int , __snake_case : Dict ):
super().__init__(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
a : Dict = image_processor
a : Tuple = feature_extractor
def __call__( self : str , __snake_case : List[str]=None , __snake_case : List[str]=None , __snake_case : Optional[Any]=None , __snake_case : List[Any]=None , __snake_case : Any=False , __snake_case : Optional[Any]=False , *__snake_case : Dict , **__snake_case : List[str] , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
a : int = None
if images is not None:
a : Optional[int] = self.image_processor(__SCREAMING_SNAKE_CASE , mask_pixel=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if images_mixed is not None:
a : int = self.image_processor(__SCREAMING_SNAKE_CASE , is_mixed=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if audio is not None:
a : str = self.feature_extractor(
__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , mask_audio=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a : int = {}
if audio is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images_mixed_dict is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
return output_dict
@property
def lowercase_ ( self : Tuple ):
a : List[Any] = self.image_processor.model_input_names
a : int = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) ) | 297 | class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = name
lowerCAmelCase = value
lowerCAmelCase = weight
def __repr__( self ) ->str:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
return self.value
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
return self.name
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
return self.weight
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
return self.value / self.weight
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int:
lowerCAmelCase = []
for i in range(len(snake_case__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
lowerCAmelCase = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ )
lowerCAmelCase = []
lowerCAmelCase , lowerCAmelCase = 0.0, 0.0
for i in range(len(snake_case__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
snake_case : Optional[Any] = get_logger(__name__)
class snake_case_ (enum.Enum ):
UpperCAmelCase__ : Optional[int] = """all_checks"""
UpperCAmelCase__ : List[str] = """basic_checks"""
UpperCAmelCase__ : Union[str, Any] = """no_checks"""
class snake_case_ (UpperCamelCase_ ):
pass
class snake_case_ (UpperCamelCase_ ):
pass
class snake_case_ (UpperCamelCase_ ):
pass
class snake_case_ (UpperCamelCase_ ):
pass
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=None ):
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(snake_case__ ) - set(snake_case__ ) ) )
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(snake_case__ ) - set(snake_case__ ) ) )
a__ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
a__ = ' for ' + verification_name if verification_name is not None else ''
if len(snake_case__ ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class snake_case_ (UpperCamelCase_ ):
pass
class snake_case_ (UpperCamelCase_ ):
pass
class snake_case_ (UpperCamelCase_ ):
pass
class snake_case_ (UpperCamelCase_ ):
pass
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple ):
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise ExpectedMoreSplits(str(set(snake_case__ ) - set(snake_case__ ) ) )
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise UnexpectedSplits(str(set(snake_case__ ) - set(snake_case__ ) ) )
a__ = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(snake_case__ ) > 0:
raise NonMatchingSplitsSizesError(str(snake_case__ ) )
logger.info('All the splits matched successfully.' )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple = True ):
if record_checksum:
a__ = shaaaa()
with open(snake_case__ , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , b'' ):
m.update(snake_case__ )
a__ = m.hexdigest()
else:
a__ = None
return {"num_bytes": os.path.getsize(snake_case__ ), "checksum": checksum}
def __lowercase ( __lowerCAmelCase : str ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 240 | import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowercase__ : Dict = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowercase__ : Optional[int] = [0, 2_5, 5_0]
lowercase__ : Union[str, Any] = [2_5, 5_0, 7_5]
lowercase__ : int = fuzz.membership.trimf(X, abca)
lowercase__ : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowercase__ : List[str] = np.ones(7_5)
lowercase__ : Any = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
lowercase__ : Union[str, Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowercase__ : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowercase__ : Union[str, Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowercase__ : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowercase__ : Any = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowercase__ : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowercase__ : Tuple = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowercase__ : Tuple = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 338 | 0 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCamelCase__ ( UpperCamelCase_ ):
'''simple docstring'''
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
SCREAMING_SNAKE_CASE = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
SCREAMING_SNAKE_CASE = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn\'t access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
BertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
BertTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
pipeline(task="""fill-mask""" ,model=__SCREAMING_SNAKE_CASE )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE = """1"""
SCREAMING_SNAKE_CASE = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("""success""" ,result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
SCREAMING_SNAKE_CASE = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
SCREAMING_SNAKE_CASE = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
BertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
BertTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
pipeline(task="""fill-mask""" ,model=__SCREAMING_SNAKE_CASE )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE = self.get_env()
SCREAMING_SNAKE_CASE = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("""success""" ,result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
SCREAMING_SNAKE_CASE = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
SCREAMING_SNAKE_CASE = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE = self.get_env()
SCREAMING_SNAKE_CASE = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("""success""" ,result.stdout.decode() )
# next emulate no network
SCREAMING_SNAKE_CASE = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE = """1"""
SCREAMING_SNAKE_CASE = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("""success""" ,result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """
from transformers import pipeline
"""
SCREAMING_SNAKE_CASE = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
SCREAMING_SNAKE_CASE = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
SCREAMING_SNAKE_CASE = self.get_env()
SCREAMING_SNAKE_CASE = """1"""
SCREAMING_SNAKE_CASE = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
SCREAMING_SNAKE_CASE = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" ,result.stderr.decode().replace("""\n""" ,"""""" ) ,)
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """
from transformers import AutoModel
"""
SCREAMING_SNAKE_CASE = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE = self.get_env()
SCREAMING_SNAKE_CASE = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("""success""" ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE = """1"""
SCREAMING_SNAKE_CASE = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("""success""" ,result.stdout.decode() )
| 296 | import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->Optional[Any]:
lowerCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ):
if i == len(__SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase = -1
else:
lowerCAmelCase = timesteps[i + 1]
lowerCAmelCase = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 1, 0]
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
| 338 | 0 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = JukeboxTokenizer
_UpperCAmelCase : Any = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
import torch
SCREAMING_SNAKE_CASE_: Optional[int] = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics")
SCREAMING_SNAKE_CASE_: Tuple = tokenizer(**self.metas)["input_ids"]
# fmt: off
SCREAMING_SNAKE_CASE_: Any = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]]),
torch.tensor([[0, 0, 0, 1069, 11]]),
torch.tensor([[0, 0, 0, 1069, 11]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[str]):
import torch
SCREAMING_SNAKE_CASE_: Any = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics")
SCREAMING_SNAKE_CASE_: str = tokenizer(**self.metas)["input_ids"]
# fmt: off
SCREAMING_SNAKE_CASE_: List[Any] = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]]),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]]),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
| 13 | import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase__ : str = logging.get_logger(__name__)
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = """AutoTokenizer"""
UpperCAmelCase_ : Optional[int] = ["""tokenizer"""]
UpperCAmelCase_ : str = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = speaker_embeddings
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , **__SCREAMING_SNAKE_CASE ) ->Tuple:
if speaker_embeddings_dict_path is not None:
lowerCAmelCase = get_file_from_repo(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , )
if speaker_embeddings_path is None:
logger.warning(
F"`{os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
lowerCAmelCase = None
else:
with open(__SCREAMING_SNAKE_CASE ) as speaker_embeddings_json:
lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = None
lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return cls(tokenizer=__SCREAMING_SNAKE_CASE , speaker_embeddings=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , __SCREAMING_SNAKE_CASE="speaker_embeddings" , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ) ->int:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''v2''' ) , exist_ok=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
lowerCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , __SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}.npy" )
lowerCAmelCase = tmp_dict
with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , '''w''' ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = self.speaker_embeddings[voice_preset]
lowerCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
lowerCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , )
if path is None:
raise ValueError(
F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE )
return voice_preset_dict
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None ) ->Tuple:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="pt" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) ->int:
if voice_preset is not None and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE )
else:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not voice_preset.endswith('''.npz''' ):
lowerCAmelCase = voice_preset + '''.npz'''
lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE )
if voice_preset is not None:
self._validate_voice_preset_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCAmelCase = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if voice_preset is not None:
lowerCAmelCase = voice_preset
return encoded_text
| 338 | 0 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Tuple = tmp_path / "cache"
__UpperCamelCase : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase : Any = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = tmp_path / "cache"
__UpperCamelCase : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__UpperCamelCase : int = features.copy() if features else default_expected_features
__UpperCamelCase : int = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase : int = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = tmp_path / "cache"
__UpperCamelCase : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__UpperCamelCase : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
if issubclass(snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = parquet_path
elif issubclass(snake_case__ , snake_case__ ):
__UpperCamelCase : Tuple = [parquet_path]
__UpperCamelCase : Optional[int] = tmp_path / "cache"
__UpperCamelCase : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__UpperCamelCase : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__=("train",) ):
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
__UpperCamelCase : List[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Tuple = tmp_path / "cache"
__UpperCamelCase : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase : Dict = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Tuple = tmp_path / "cache"
__UpperCamelCase : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__UpperCamelCase : Optional[Any] = features.copy() if features else default_expected_features
__UpperCamelCase : Optional[Any] = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase : str = ParquetDatasetReader({"train": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
if split:
__UpperCamelCase : Optional[int] = {split: parquet_path}
else:
__UpperCamelCase : Optional[Any] = "train"
__UpperCamelCase : Tuple = {"train": parquet_path, "test": parquet_path}
__UpperCamelCase : Optional[int] = tmp_path / "cache"
__UpperCamelCase : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__UpperCamelCase : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = ParquetDatasetWriter(snake_case__ , tmp_path / "foo.parquet" )
assert writer.write() > 0
__UpperCamelCase : Optional[Any] = pq.ParquetFile(tmp_path / "foo.parquet" )
__UpperCamelCase : Optional[int] = pf.read()
assert dataset.data.table == output_table
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = str(shared_datadir / "test_image_rgb.jpg" )
__UpperCamelCase : Any = {"image": [image_path]}
__UpperCamelCase : List[str] = Features({"image": Image()} )
__UpperCamelCase : Optional[int] = Dataset.from_dict(snake_case__ , features=snake_case__ )
__UpperCamelCase : Tuple = ParquetDatasetWriter(snake_case__ , tmp_path / "foo.parquet" )
assert writer.write() > 0
__UpperCamelCase : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
__UpperCamelCase : Optional[int] = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=snake_case__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
assert get_writer_batch_size(snake_case__ ) == expected
| 298 | import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 338 | 0 |
import pprint
import requests
UpperCAmelCase_ = '''https://zenquotes.io/api'''
def lowerCAmelCase_ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def lowerCAmelCase_ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
UpperCAmelCase_ = random_quotes()
pprint.pprint(response)
| 201 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase__ : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowercase__ : Dict = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
lowerCAmelCase = self.transformer_dir
shutil.copy(
os.path.join(__SCREAMING_SNAKE_CASE , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Union[str, Any]:
lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' , newline='''\n''' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
self.assertTrue(f.read() , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __SCREAMING_SNAKE_CASE , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with a really long name
lowerCAmelCase = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('''Bert''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __SCREAMING_SNAKE_CASE , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
self.assertFalse(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 338 | 0 |
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = name
SCREAMING_SNAKE_CASE_ : str = value
SCREAMING_SNAKE_CASE_ : Optional[int] = weight
def __repr__( self ):
"""simple docstring"""
return f"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.value
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.name
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.weight
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.value / self.weight
def A_ ( a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for i in range(len(snake_case__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A_ ( a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = 0.0, 0.0
for i in range(len(snake_case__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A_ ( ):
"""simple docstring"""
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253 | import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = split_dict._to_yaml_list()
assert len(snake_case__ ) == len(snake_case__ )
lowerCAmelCase = SplitDict._from_yaml_list(snake_case__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=snake_case__ ), SplitInfo(dataset_name='''my_dataset''' )] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 338 | 0 |
'''simple docstring'''
def snake_case_ (_a : Union[str, Any] , _a : Dict ):
UpperCAmelCase = len(snake_case__ ) + 1
UpperCAmelCase = len(snake_case__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase = [[0 for i in range(snake_case__ )] for j in range(snake_case__ )]
# since string of zero length match pattern of zero length
UpperCAmelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , snake_case__ ):
UpperCAmelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , snake_case__ ):
UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , snake_case__ ):
for j in range(1 , snake_case__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase = dp[i - 1][j]
else:
UpperCAmelCase = 0
else:
UpperCAmelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
A ='''aab'''
A ='''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 34 | import unittest
import numpy as np
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ) -> np.ndarray:
lowerCAmelCase = np.shape(snake_case__ )
lowerCAmelCase = np.shape(snake_case__ )
lowerCAmelCase = np.shape(snake_case__ )
if shape_a[0] != shape_b[0]:
lowerCAmelCase = (
'''Expected the same number of rows for A and B. '''
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(snake_case__ )
if shape_b[1] != shape_c[1]:
lowerCAmelCase = (
'''Expected the same number of columns for B and C. '''
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(snake_case__ )
lowerCAmelCase = pseudo_inv
if a_inv is None:
try:
lowerCAmelCase = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
lowerCAmelCase = schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.block([[a, b], [b.T, c]] )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 338 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_UpperCamelCase = sys.version_info >= (3, 10)
def a_ ( _lowerCAmelCase=None ,_lowerCAmelCase=None ) -> Any:
return field(default_factory=lambda: default ,metadata=snake_case__ )
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =42
a_ =42
a_ =42
a_ =42
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =42
a_ =field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =False
a_ =True
a_ =None
class lowerCamelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
a_ ="""titi"""
a_ ="""toto"""
class lowerCamelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
a_ ="""titi"""
a_ ="""toto"""
a_ =42
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ ="toto"
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCamelCase : Dict = BasicEnum(self.foo )
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ ="toto"
def _lowercase ( self : str ) -> Tuple:
__lowerCamelCase : Dict = MixedTypeEnum(self.foo )
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =None
a_ =field(default=UpperCamelCase_ , metadata={"""help""": """help message"""} )
a_ =None
a_ =list_field(default=[] )
a_ =list_field(default=[] )
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =list_field(default=[] )
a_ =list_field(default=[1, 2, 3] )
a_ =list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
a_ =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =field()
a_ =field()
a_ =field()
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = BasicEnum(self.required_enum )
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =42
a_ =field()
a_ =None
a_ =field(default="""toto""" , metadata={"""help""": """help message"""} )
a_ =list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =False
a_ =True
a_ =None
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =None
a_ =field(default=UpperCamelCase_ , metadata={"""help""": """help message"""} )
a_ =None
a_ =list_field(default=[] )
a_ =list_field(default=[] )
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Union[str, Any] , _a : Union[str, Any] , _a : int ) -> int:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__lowerCamelCase : str = {k: v for k, v in vars(__SCREAMING_SNAKE_CASE ).items() if k != 'container'}
__lowerCamelCase : Optional[Any] = {k: v for k, v in vars(__SCREAMING_SNAKE_CASE ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , __SCREAMING_SNAKE_CASE ) and yy.get('choices' , __SCREAMING_SNAKE_CASE ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](__SCREAMING_SNAKE_CASE ) , yy['type'](__SCREAMING_SNAKE_CASE ) )
del xx["type"], yy["type"]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : Any ) -> Tuple:
__lowerCamelCase : Optional[Any] = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[str] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('--bar' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('--baz' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('--flag' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='?' )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCamelCase : Dict = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((__lowerCamelCase ) , ) : Any = parser.parse_args_into_dataclasses(__SCREAMING_SNAKE_CASE , look_for_args_file=__SCREAMING_SNAKE_CASE )
self.assertFalse(example.flag )
def _lowercase ( self : Optional[int] ) -> Any:
__lowerCamelCase : int = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('--baz' , default='toto' , type=__SCREAMING_SNAKE_CASE , help='help message' )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : Any ) -> List[str]:
__lowerCamelCase : str = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='?' )
expected.add_argument('--baz' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=__SCREAMING_SNAKE_CASE , dest='baz' )
expected.add_argument('--opt' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[int] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
__lowerCamelCase : int = HfArgumentParser(__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCamelCase : int = parser.parse_args([] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
__lowerCamelCase : str = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
__lowerCamelCase : Union[str, Any] = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
__lowerCamelCase : Tuple = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
__lowerCamelCase : Tuple = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self : str ) -> List[str]:
__lowerCamelCase : Optional[int] = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Dict = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCamelCase : str = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
__lowerCamelCase : int = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__lowerCamelCase : Optional[int] = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
__lowerCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__lowerCamelCase : List[str] = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
__lowerCamelCase : Dict = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _lowercase ( self : Union[str, Any] ) -> Dict:
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ ="toto"
__lowerCamelCase : int = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCamelCase : str = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
__lowerCamelCase : Union[str, Any] = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
__lowerCamelCase : Union[str, Any] = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def _lowercase ( self : int ) -> List[Any]:
__lowerCamelCase : Tuple = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCamelCase : str = parser.parse_args([] )
self.assertEqual(
__SCREAMING_SNAKE_CASE , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
__lowerCamelCase : List[str] = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def _lowercase ( self : Dict ) -> Optional[int]:
__lowerCamelCase : Tuple = argparse.ArgumentParser()
expected.add_argument('--foo' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('--bar' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help='help message' )
expected.add_argument('--baz' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('--ces' , nargs='+' , default=[] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument('--des' , nargs='+' , default=[] , type=__SCREAMING_SNAKE_CASE )
__lowerCamelCase : int = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
__lowerCamelCase : Optional[int] = HfArgumentParser(__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCamelCase : Any = parser.parse_args([] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , bar=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , ces=[] , des=[] ) )
__lowerCamelCase : Tuple = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCamelCase : Optional[Any] = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : int = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument('--required_str' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__SCREAMING_SNAKE_CASE , )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : str ) -> List[Any]:
__lowerCamelCase : Optional[int] = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__SCREAMING_SNAKE_CASE , )
expected.add_argument('--opt' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE )
expected.add_argument('--baz' , default='toto' , type=__SCREAMING_SNAKE_CASE , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
__lowerCamelCase : Tuple = parser.parse_dict(__SCREAMING_SNAKE_CASE )[0]
__lowerCamelCase : int = BasicExample(**__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : str ) -> Dict:
__lowerCamelCase : List[str] = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[int] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(__SCREAMING_SNAKE_CASE , parser.parse_dict , __SCREAMING_SNAKE_CASE , allow_extra_keys=__SCREAMING_SNAKE_CASE )
def _lowercase ( self : Optional[Any] ) -> int:
__lowerCamelCase : Optional[int] = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : int = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : List[str] = os.path.join(__SCREAMING_SNAKE_CASE , 'temp_json' )
os.mkdir(__SCREAMING_SNAKE_CASE )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[Any] = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
__lowerCamelCase : List[str] = BasicExample(**__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
__lowerCamelCase : int = HfArgumentParser(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Union[str, Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , 'temp_yaml' )
os.mkdir(__SCREAMING_SNAKE_CASE )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCamelCase : str = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
__lowerCamelCase : Optional[Any] = BasicExample(**__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : str ) -> Union[str, Any]:
__lowerCamelCase : str = HfArgumentParser(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 208 | import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowercase__ : Any = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
lowerCAmelCase = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
lowercase__ : List[Any] = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = list(s_dict.keys() )
for key in keys:
lowerCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowerCAmelCase = new_key.replace(snake_case__ , snake_case__ )
print(f"{key} -> {new_key}" )
lowerCAmelCase = s_dict.pop(snake_case__ )
return s_dict
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = emb.weight.shape
lowerCAmelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
lowerCAmelCase = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> bytes:
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase = os.path.basename(snake_case__ )
lowerCAmelCase = url.split('''/''' )[-2]
lowerCAmelCase = os.path.join(snake_case__ , snake_case__ )
if os.path.exists(snake_case__ ) and not os.path.isfile(snake_case__ ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(snake_case__ ):
lowerCAmelCase = open(snake_case__ , '''rb''' ).read()
if hashlib.shaaaa(snake_case__ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(snake_case__ ) as source, open(snake_case__ , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=snake_case__ , unit_divisor=1_0_2_4 ) as loop:
while True:
lowerCAmelCase = source.read(8_1_9_2 )
if not buffer:
break
output.write(snake_case__ )
loop.update(len(snake_case__ ) )
lowerCAmelCase = open(snake_case__ , '''rb''' ).read()
if hashlib.shaaaa(snake_case__ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
if ".pt" not in checkpoint_path:
lowerCAmelCase = _download(_MODELS[checkpoint_path] )
else:
lowerCAmelCase = torch.load(snake_case__ , map_location='''cpu''' )
lowerCAmelCase = original_checkpoint['''dims''']
lowerCAmelCase = original_checkpoint['''model_state_dict''']
lowerCAmelCase = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(snake_case__ )
rename_keys(snake_case__ )
lowerCAmelCase = True
lowerCAmelCase = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
lowerCAmelCase = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=snake_case__ , decoder_ffn_dim=snake_case__ , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
lowerCAmelCase = WhisperForConditionalGeneration(snake_case__ )
lowerCAmelCase , lowerCAmelCase = model.model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0 and not set(snake_case__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCAmelCase = proj_out_weights
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase__ : int = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 338 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
def lowercase_ ( _lowercase ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
lowerCamelCase_ : List[str] = 1_024
lowerCamelCase_ : Optional[int] = 4_096
lowerCamelCase_ : Optional[Any] = 24
lowerCamelCase_ : Any = 16
lowerCamelCase_ : Any = [5, 11, 17, 23]
lowerCamelCase_ : Optional[int] = [256, 512, 1_024, 1_024]
lowerCamelCase_ : Optional[int] = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCamelCase_ : Optional[Any] = 768
lowerCamelCase_ : int = [1, 1, 1, 0.5]
lowerCamelCase_ : List[str] = [256, 512, 768, 768]
lowerCamelCase_ : List[str] = 150
lowerCamelCase_ : Any = 16
lowerCamelCase_ : Tuple = (1, 384, 384)
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[str] = '''project'''
if "ade" in checkpoint_url:
lowerCamelCase_ : Any = True
lowerCamelCase_ : Union[str, Any] = 768
lowerCamelCase_ : Any = [1, 1, 1, 0.5]
lowerCamelCase_ : Tuple = 150
lowerCamelCase_ : List[str] = 16
lowerCamelCase_ : str = '''huggingface/label-files'''
lowerCamelCase_ : Tuple = '''ade20k-id2label.json'''
lowerCamelCase_ : Any = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='''dataset''' ) ) , '''r''' ) )
lowerCamelCase_ : Union[str, Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
lowerCamelCase_ : Optional[Any] = idalabel
lowerCamelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCamelCase_ : Optional[int] = [1, 150, 480, 480]
return config, expected_shape
def lowercase_ ( _lowercase ) -> Any:
'''simple docstring'''
lowerCamelCase_ : Tuple = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def lowercase_ ( _lowercase ) -> Dict:
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase_ : List[str] = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
lowerCamelCase_ : int = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
lowerCamelCase_ : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
lowerCamelCase_ : Optional[int] = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
lowerCamelCase_ : Any = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
lowerCamelCase_ : List[str] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase_ : Dict = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
lowerCamelCase_ : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
lowerCamelCase_ : List[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
lowerCamelCase_ : str = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
lowerCamelCase_ : Dict = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
lowerCamelCase_ : Optional[int] = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
lowerCamelCase_ : Tuple = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
lowerCamelCase_ : Optional[Any] = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
lowerCamelCase_ : List[str] = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase_ : Optional[Any] = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
lowerCamelCase_ : Tuple = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
lowerCamelCase_ : Tuple = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
lowerCamelCase_ : int = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase_ : Tuple = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase_ : List[str] = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase_ : Any = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase_ : Tuple = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase_ : List[str] = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase_ : Optional[Any] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase_ : str = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase_ : Any = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase_ : Optional[int] = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase_ : List[str] = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
lowerCamelCase_ : Optional[int] = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
lowerCamelCase_ : Any = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
lowerCamelCase_ : List[str] = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
lowerCamelCase_ : List[Any] = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
lowerCamelCase_ : Dict = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
lowerCamelCase_ : Optional[int] = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
lowerCamelCase_ : Optional[int] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
lowerCamelCase_ : Dict = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
lowerCamelCase_ : str = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
lowerCamelCase_ : Optional[Any] = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
lowerCamelCase_ : Any = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
lowerCamelCase_ : List[Any] = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
lowerCamelCase_ : Dict = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def lowercase_ ( _lowercase , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ : Any = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
lowerCamelCase_ : Union[str, Any] = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ : Tuple = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ : Any = in_proj_bias[: config.hidden_size]
lowerCamelCase_ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ : Any = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase_ : List[str] = in_proj_bias[-config.hidden_size :]
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_, lowerCamelCase_ : Tuple = get_dpt_config(snake_case__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCamelCase_ : List[str] = torch.load(snake_case__ , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ : str = state_dict.pop(snake_case__ )
lowerCamelCase_ : str = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
lowerCamelCase_ : int = DPTForSemanticSegmentation(snake_case__ ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
lowerCamelCase_ : Union[str, Any] = 480 if '''ade''' in checkpoint_url else 384
lowerCamelCase_ : str = DPTImageProcessor(size=snake_case__ )
lowerCamelCase_ : str = prepare_img()
lowerCamelCase_ : Union[str, Any] = image_processor(snake_case__ , return_tensors='''pt''' )
# forward pass
lowerCamelCase_ : int = model(**snake_case__ ).logits if '''ade''' in checkpoint_url else model(**snake_case__ ).predicted_depth
if show_prediction:
lowerCamelCase_ : Optional[int] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=snake_case__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
__lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
__lowercase : int = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 318 | from ...processing_utils import ProcessorMixin
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = ["""image_processor""", """feature_extractor"""]
UpperCAmelCase_ : Optional[int] = """TvltImageProcessor"""
UpperCAmelCase_ : Optional[int] = """TvltFeatureExtractor"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
super().__init__(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor
lowerCAmelCase = feature_extractor
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) ->List[Any]:
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
lowerCAmelCase = None
if images is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , mask_pixel=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if images_mixed is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , is_mixed=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if audio is not None:
lowerCAmelCase = self.feature_extractor(
__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , mask_audio=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
if audio is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images_mixed_dict is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
return output_dict
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.image_processor.model_input_names
lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 338 | 0 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
) | 233 | def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[str]:
lowerCAmelCase = len(snake_case__ )
for i in range(length - 1 ):
lowerCAmelCase = i
for k in range(i + 1 , snake_case__ ):
if collection[k] < collection[least]:
lowerCAmelCase = k
if least != i:
lowerCAmelCase , lowerCAmelCase = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowercase__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
lowercase__ : str = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 338 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowerCAmelCase: List[str] = logging.get_logger(__name__)
class a__( UpperCamelCase_ ):
def __init__( self : str , *__snake_case : Optional[int] , **__snake_case : List[Any] ):
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) | 297 | import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=19 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) ->Union[str, Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__SCREAMING_SNAKE_CASE , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = EsmForProteinFolding(config=__SCREAMING_SNAKE_CASE ).float()
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Dict = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCAmelCase_ : List[Any] = ()
UpperCAmelCase_ : Tuple = {} if is_torch_available() else {}
UpperCAmelCase_ : List[str] = False
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = EsmFoldModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip('''Does not support attention outputs''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
pass
@unittest.skip
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold only has one output format.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
@require_torch
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
lowerCAmelCase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )['''positions''']
lowerCAmelCase = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 338 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.