code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Tuple =logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] ={
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """vit_msn"""
def __init__( self , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-06 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=1_6 , lowerCAmelCase__=3 , lowerCAmelCase__=True , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : str = image_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE_ : Any = num_channels
SCREAMING_SNAKE_CASE_ : int = qkv_bias
| 101 |
from collections import defaultdict
from math import ceil, sqrt
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 1_000_000 , _SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
_A = defaultdict(_SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_A = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_A = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"{solution() = }")
| 27 | 0 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__magic_name__ : List[str] = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(SCREAMING_SNAKE_CASE ) , version.parse(SCREAMING_SNAKE_CASE ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ):
UpperCamelCase : str = f"""\n{hint}""" if hint is not None else """"""
# non-versioned check
if re.match(r"""^[\w_\-\d]+$""" , SCREAMING_SNAKE_CASE ):
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = requirement, None, None
else:
UpperCamelCase : str = re.findall(r"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
f""" got {requirement}""" )
UpperCamelCase , UpperCamelCase : Dict = match[0]
UpperCamelCase : List[str] = want_full.split(""",""" ) # there could be multiple requirements
UpperCamelCase : List[Any] = {}
for w in want_range:
UpperCamelCase : Dict = re.findall(r"""^([\s!=<>]{1,2})(.+)""" , SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
f""" but got {requirement}""" )
UpperCamelCase , UpperCamelCase : List[Any] = match[0]
UpperCamelCase : Any = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
UpperCamelCase : Optional[Any] = """.""".join([str(SCREAMING_SNAKE_CASE ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return
# check if any version is installed
try:
UpperCamelCase : Tuple = importlib.metadata.version(SCREAMING_SNAKE_CASE )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : Any = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 102 |
from math import pi, sqrt, tan
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
_A = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
_A = (sidea + sidea + sidea) / 2
_A = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 27 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : List[Any] = PhobertTokenizer
A__ : Optional[Any] = False
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
_snake_case = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
_snake_case = ['''#version: 0.2''', '''l à</w>''']
_snake_case = {'''unk_token''': '''<unk>'''}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Optional[Any] , **__lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ):
"""simple docstring"""
_snake_case = '''Tôi là VinAI Research'''
_snake_case = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case = '''Tôi là VinAI Research'''
_snake_case = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
_snake_case = tokenizer.tokenize(__lowerCamelCase )
print(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
| 103 |
import numpy as np
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> np.array:
"""simple docstring"""
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def _lowerCamelCase ( UpperCAmelCase_ : List[str], UpperCAmelCase_ : Optional[Any], UpperCAmelCase_ : List[Any], UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : Optional[int] ) -> Dict:
"""simple docstring"""
for attribute in key.split("." ):
A__ = getattr(UpperCAmelCase_, UpperCAmelCase_ )
if weight_type is not None:
A__ = getattr(UpperCAmelCase_, UpperCAmelCase_ ).shape
else:
A__ = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
A__ = value
elif weight_type == "weight_g":
A__ = value
elif weight_type == "weight_v":
A__ = value
elif weight_type == "bias":
A__ = value
else:
A__ = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowerCamelCase ( UpperCAmelCase_ : List[Any], UpperCAmelCase_ : int, UpperCAmelCase_ : Union[str, Any] ) -> str:
"""simple docstring"""
A__ = []
A__ = fairseq_model.state_dict()
A__ = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
A__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, hf_model.config.feat_extract_norm == "group", )
A__ = True
else:
for key, mapped_key in MAPPING.items():
A__ = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
A__ = True
if "*" in mapped_key:
A__ = name.split(UpperCAmelCase_ )[0].split("." )[-2]
A__ = mapped_key.replace("*", UpperCAmelCase_ )
if "weight_g" in name:
A__ = "weight_g"
elif "weight_v" in name:
A__ = "weight_v"
elif "weight" in name:
A__ = "weight"
elif "bias" in name:
A__ = "bias"
else:
A__ = None
set_recursively(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _lowerCamelCase ( UpperCAmelCase_ : Dict, UpperCAmelCase_ : List[str], UpperCAmelCase_ : Optional[Any], UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
A__ = full_name.split("conv_layers." )[-1]
A__ = name.split("." )
A__ = int(items[0] )
A__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
A__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
A__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
A__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
A__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase_ )
@torch.no_grad()
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : Any=None, UpperCAmelCase_ : List[str]=None, UpperCAmelCase_ : Tuple=True ) -> Optional[int]:
"""simple docstring"""
if config_path is not None:
A__ = HubertConfig.from_pretrained(UpperCAmelCase_ )
else:
A__ = HubertConfig()
if is_finetuned:
if dict_path:
A__ = Dictionary.load(UpperCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A__ = target_dict.pad_index
A__ = target_dict.bos_index
A__ = target_dict.eos_index
A__ = len(target_dict.symbols )
A__ = os.path.join(UpperCAmelCase_, "vocab.json" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCAmelCase_ ) )
return
os.makedirs(UpperCAmelCase_, exist_ok=UpperCAmelCase_ )
with open(UpperCAmelCase_, "w", encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices, UpperCAmelCase_ )
A__ = WavaVecaCTCTokenizer(
UpperCAmelCase_, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=UpperCAmelCase_, )
A__ = True if config.feat_extract_norm == "layer" else False
A__ = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=UpperCAmelCase_, return_attention_mask=UpperCAmelCase_, )
A__ = WavaVecaProcessor(feature_extractor=UpperCAmelCase_, tokenizer=UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
A__ = HubertForCTC(UpperCAmelCase_ )
else:
A__ = HubertModel(UpperCAmelCase_ )
if is_finetuned:
A__ , A__ , A__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
A__ , A__ , A__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A__ = model[0].eval()
recursively_load_weights(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
hf_wavavec.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCamelCase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 104 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 27 | 0 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ):
if isinstance(snake_case__ ,snake_case__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
SCREAMING_SNAKE_CASE_ : Any = deepcopy(snake_case__ )
elif os.path.exists(snake_case__ ):
with io.open(snake_case__ ,'r' ,encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = json.load(snake_case__ )
else:
try:
SCREAMING_SNAKE_CASE_ : Dict = baseaa.urlsafe_baadecode(snake_case__ ).decode('utf-8' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(snake_case__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F'Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}' )
SCREAMING_SNAKE_CASE_ : List[Any] = config
self.set_stage_and_offload()
def snake_case ( self ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
SCREAMING_SNAKE_CASE_ : List[str] = self.get_value('zero_optimization.stage' ,-1 )
# offload
SCREAMING_SNAKE_CASE_ : int = False
if self.is_zeroa() or self.is_zeroa():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = set(['cpu', 'nvme'] )
SCREAMING_SNAKE_CASE_ : List[Any] = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
SCREAMING_SNAKE_CASE_ : int = True
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = self.config
# find the config node of interest if it exists
SCREAMING_SNAKE_CASE_ : Dict = ds_key_long.split('.' )
SCREAMING_SNAKE_CASE_ : Dict = nodes.pop()
for node in nodes:
SCREAMING_SNAKE_CASE_ : Optional[int] = config.get(snake_case__ )
if config is None:
return None, ds_key
return config, ds_key
def snake_case ( self ,snake_case__ ,snake_case__=None ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.find_config_node(snake_case__ )
if config is None:
return default
return config.get(snake_case__ ,snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : str = self.config
# find the config node of interest if it exists
SCREAMING_SNAKE_CASE_ : int = ds_key_long.split('.' )
for node in nodes:
SCREAMING_SNAKE_CASE_ : Any = config
SCREAMING_SNAKE_CASE_ : str = config.get(snake_case__ )
if config is None:
if must_exist:
raise ValueError(F'Can\'t find {ds_key_long} entry in the config: {self.config}' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = self.get_value(snake_case__ )
return False if value is None else bool(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = self.get_value(snake_case__ )
return False if value is None else not bool(snake_case__ )
def snake_case ( self ):
return self._stage == 2
def snake_case ( self ):
return self._stage == 3
def snake_case ( self ):
return self._offload
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = engine
def snake_case ( self ,snake_case__ ,**snake_case__ ):
# runs backpropagation and handles mixed precision
self.engine.backward(snake_case__ ,**snake_case__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ):
super().__init__(snake_case__ ,device_placement=snake_case__ ,scaler=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = hasattr(self.optimizer ,'overflow' )
def snake_case ( self ,snake_case__=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def snake_case ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def snake_case ( self ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ):
super().__init__(snake_case__ ,snake_case__ )
def snake_case ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=0.001 ,snake_case__=0 ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = params
SCREAMING_SNAKE_CASE_ : Dict = lr
SCREAMING_SNAKE_CASE_ : Union[str, Any] = weight_decay
SCREAMING_SNAKE_CASE_ : Dict = kwargs
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=None ,snake_case__=0 ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = optimizer
SCREAMING_SNAKE_CASE_ : List[Any] = total_num_steps
SCREAMING_SNAKE_CASE_ : Dict = warmup_num_steps
SCREAMING_SNAKE_CASE_ : str = kwargs
| 105 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__A : List[Any] = "http://www.mocksite.com/file1.txt"
__A : List[Any] = "\"text\": [\"foo\", \"foo\"]"
__A : Dict = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class lowerCamelCase:
'''simple docstring'''
__magic_name__ = 200
__magic_name__ = {'Content-Length': '100'}
__magic_name__ = {}
def lowerCAmelCase__ ( self , **snake_case_ ):
return [bytes(snake_case_ , 'utf-8' )]
def __lowerCAmelCase( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
import requests
monkeypatch.setattr(_SCREAMING_SNAKE_CASE , 'request' , _SCREAMING_SNAKE_CASE )
_A = URL
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = url
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [url]
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = {'train': url}
_A = 'dummy'
_A = 'downloads'
_A = tmp_path
_A = DownloadConfig(
cache_dir=os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , use_etag=_SCREAMING_SNAKE_CASE , )
_A = DownloadManager(dataset_name=_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
_A = dl_manager.download(_SCREAMING_SNAKE_CASE )
_A = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [downloaded_paths]
_A = [urls]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert "train" in downloaded_paths.keys()
_A = downloaded_paths.values()
_A = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_A = Path(_SCREAMING_SNAKE_CASE )
_A = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_A = downloaded_path.read_text()
assert content == CONTENT
_A = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_A = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
_A = str(_SCREAMING_SNAKE_CASE )
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = filename
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [filename]
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = {'train': filename}
_A = 'dummy'
_A = xz_file.parent
_A = 'extracted'
_A = DownloadConfig(
cache_dir=_SCREAMING_SNAKE_CASE , use_etag=_SCREAMING_SNAKE_CASE , )
_A = DownloadManager(dataset_name=_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
_A = dl_manager.extract(_SCREAMING_SNAKE_CASE )
_A = paths
for extracted_paths in [extracted_paths]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [extracted_paths]
_A = [paths]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert "train" in extracted_paths.keys()
_A = extracted_paths.values()
_A = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_A = Path(_SCREAMING_SNAKE_CASE )
_A = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_SCREAMING_SNAKE_CASE , etag=_SCREAMING_SNAKE_CASE )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_A = extracted_path.read_text()
_A = text_file.read_text()
assert extracted_file_content == expected_file_content
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
assert path.endswith('.jsonl' )
for num_items, line in enumerate(_SCREAMING_SNAKE_CASE , start=1 ):
_A = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = request.getfixturevalue(_SCREAMING_SNAKE_CASE )
_A = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ):
_test_jsonl(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_A = request.getfixturevalue(_SCREAMING_SNAKE_CASE )
_A = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ):
_test_jsonl(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert num_tar == 1
assert num_jsonl == 2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) , start=1 ):
assert os.path.basename(_SCREAMING_SNAKE_CASE ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 27 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
A_ : List[Any] = BarthezTokenizer
A_ : List[str] = BarthezTokenizerFast
A_ : Any = True
A_ : Any = True
def __UpperCamelCase ( self : int ) -> Tuple:
super().setUp()
A = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__UpperCamelCase )
A = tokenizer
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
A = '<pad>'
A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(__UpperCamelCase ) , 101_122 )
def __UpperCamelCase ( self : Any ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 101_122 )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> str:
A = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
A = [0, 57, 3_018, 70_307, 91, 2]
A = self.tokenizer(
__UpperCamelCase , max_length=len(__UpperCamelCase ) , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors='pt' )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
A = batch.input_ids.tolist()[0]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
if not self.test_rust_tokenizer:
return
A = self.get_tokenizer()
A = self.get_rust_tokenizer()
A = 'I was born in 92000, and this is falsé.'
A = tokenizer.tokenize(__UpperCamelCase )
A = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
A = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
A = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
A = self.get_rust_tokenizer()
A = tokenizer.encode(__UpperCamelCase )
A = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
# fmt: off
A = {'input_ids': [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
A = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=__UpperCamelCase , ) | 106 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
_A = int(number**0.5 )
return number == sq * sq
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> tuple[int, int]:
"""simple docstring"""
_A = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_A = x_den * y_den * z_den
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 35 ) -> int:
"""simple docstring"""
_A = set()
_A = 42
_A = Fraction(0 )
_A = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_A = x_num * y_den + x_den * y_num
_A = x_den * y_den
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_A = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_A = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
_A = x_num * y_num
_A = x_den * y_num + x_num * y_den
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_A = x_num * x_num * y_num * y_num
_A = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"{solution() = }")
| 27 | 0 |
'''simple docstring'''
import functools
def _SCREAMING_SNAKE_CASE ( __snake_case : list[int] , __snake_case : list[int] ):
# Validation
if not isinstance(__snake_case , __snake_case ) or not all(isinstance(__snake_case , __snake_case ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(__snake_case ) != 3 or not all(isinstance(__snake_case , __snake_case ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(__snake_case ) == 0:
return 0
if min(__snake_case ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(__snake_case ) >= 3_6_6:
raise ValueError('All days elements should be less than 366' )
_A = set(__snake_case )
@functools.cache
def dynamic_programming(__snake_case : int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
from __future__ import annotations
import math
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
if num <= 0:
_A = F"{num}: Invalid input, please enter a positive integer."
raise ValueError(_SCREAMING_SNAKE_CASE )
_A = [True] * (num + 1)
_A = []
_A = 2
_A = int(math.sqrt(_SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , _SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
_A = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 27 | 0 |
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> int:
if not isinstance(__snake_case , __snake_case ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 108 |
__A : Dict = "Alexander Joslin"
import operator as op
from .stack import Stack
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
_A = Stack()
_A = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(_SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
_A = operator_stack.peek()
operator_stack.pop()
_A = operand_stack.peek()
operand_stack.pop()
_A = operand_stack.peek()
operand_stack.pop()
_A = operators[opr](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
operand_stack.push(_SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__A : Any = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 27 | 0 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __a ( _snake_case, _snake_case ):
@register_to_config
def __init__( self : Optional[Any] ,lowerCamelCase : int = 768 ,):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 ,lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = nn.Parameter(torch.ones(1 ,lowerCamelCase ) )
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Optional[Union[str, torch.device]] = None ,lowerCamelCase : Optional[torch.dtype] = None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Parameter(self.mean.to(lowerCamelCase ).to(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = nn.Parameter(self.std.to(lowerCamelCase ).to(lowerCamelCase ) )
return self
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (embeds - self.mean) * 1.0 / self.std
return embeds
def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (embeds * self.std) + self.mean
return embeds
| 109 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
_A = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def lowerCAmelCase__ ( self ):
_A = self.dummy_uncond_unet
_A = KarrasVeScheduler()
_A = KarrasVePipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=2 , generator=snake_case_ , output_type='numpy' ).images
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=2 , generator=snake_case_ , output_type='numpy' , return_dict=snake_case_ )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
_A = 'google/ncsnpp-celebahq-256'
_A = UNetaDModel.from_pretrained(snake_case_ )
_A = KarrasVeScheduler()
_A = KarrasVePipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=20 , generator=snake_case_ , output_type='numpy' ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_A = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 27 | 0 |
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class a ( nn.Module ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=0.0 , UpperCamelCase_ = None , UpperCamelCase_ = "geglu" , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = True , UpperCamelCase_ = "layer_norm" , UpperCamelCase_ = False , ):
super().__init__()
UpperCAmelCase__ : int = only_cross_attention
UpperCAmelCase__ : List[str] = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
UpperCAmelCase__ : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
UpperCAmelCase__ : Any = AdaLayerNorm(UpperCamelCase_ , UpperCamelCase_ )
elif self.use_ada_layer_norm_zero:
UpperCAmelCase__ : Optional[Any] = AdaLayerNormZero(UpperCamelCase_ , UpperCamelCase_ )
else:
UpperCAmelCase__ : List[str] = nn.LayerNorm(UpperCamelCase_ , elementwise_affine=UpperCamelCase_ )
UpperCAmelCase__ : Any = Attention(
query_dim=UpperCamelCase_ , heads=UpperCamelCase_ , dim_head=UpperCamelCase_ , dropout=UpperCamelCase_ , bias=UpperCamelCase_ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase_ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
UpperCAmelCase__ : Tuple = (
AdaLayerNorm(UpperCamelCase_ , UpperCamelCase_ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase_ , elementwise_affine=UpperCamelCase_ )
)
UpperCAmelCase__ : Any = Attention(
query_dim=UpperCamelCase_ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase_ , dim_head=UpperCamelCase_ , dropout=UpperCamelCase_ , bias=UpperCamelCase_ , upcast_attention=UpperCamelCase_ , ) # is self-attn if encoder_hidden_states is none
else:
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : List[str] = None
# 3. Feed-forward
UpperCAmelCase__ : List[str] = nn.LayerNorm(UpperCamelCase_ , elementwise_affine=UpperCamelCase_ )
UpperCAmelCase__ : Any = FeedForward(UpperCamelCase_ , dropout=UpperCamelCase_ , activation_fn=UpperCamelCase_ , final_dropout=UpperCamelCase_ )
# let chunk size default to None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : int = 0
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
# Sets chunk feed-forward
UpperCAmelCase__ : Optional[Any] = chunk_size
UpperCAmelCase__ : Dict = dim
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
UpperCAmelCase__ : int = self.norma(UpperCamelCase_ , UpperCamelCase_ )
elif self.use_ada_layer_norm_zero:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = self.norma(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hidden_dtype=hidden_states.dtype )
else:
UpperCAmelCase__ : Optional[Any] = self.norma(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = cross_attention_kwargs if cross_attention_kwargs is not None else {}
UpperCAmelCase__ : int = self.attna(
UpperCamelCase_ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
if self.use_ada_layer_norm_zero:
UpperCAmelCase__ : List[str] = gate_msa.unsqueeze(1 ) * attn_output
UpperCAmelCase__ : Optional[int] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
UpperCAmelCase__ : Union[str, Any] = (
self.norma(UpperCamelCase_ , UpperCamelCase_ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase_ )
)
UpperCAmelCase__ : int = self.attna(
UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCAmelCase__ : str = attn_output + hidden_states
# 3. Feed-forward
UpperCAmelCase__ : Optional[int] = self.norma(UpperCamelCase_ )
if self.use_ada_layer_norm_zero:
UpperCAmelCase__ : str = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
UpperCAmelCase__ : List[str] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
UpperCAmelCase__ : int = torch.cat(
[self.ff(UpperCamelCase_ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase_ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
UpperCAmelCase__ : Dict = self.ff(UpperCamelCase_ )
if self.use_ada_layer_norm_zero:
UpperCAmelCase__ : Tuple = gate_mlp.unsqueeze(1 ) * ff_output
UpperCAmelCase__ : List[Any] = ff_output + hidden_states
return hidden_states
class a ( nn.Module ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = 4 , UpperCamelCase_ = 0.0 , UpperCamelCase_ = "geglu" , UpperCamelCase_ = False , ):
super().__init__()
UpperCAmelCase__ : List[Any] = int(dim * mult )
UpperCAmelCase__ : int = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
UpperCAmelCase__ : List[Any] = GELU(UpperCamelCase_ , UpperCamelCase_ )
if activation_fn == "gelu-approximate":
UpperCAmelCase__ : List[str] = GELU(UpperCamelCase_ , UpperCamelCase_ , approximate='tanh' )
elif activation_fn == "geglu":
UpperCAmelCase__ : Dict = GEGLU(UpperCamelCase_ , UpperCamelCase_ )
elif activation_fn == "geglu-approximate":
UpperCAmelCase__ : List[Any] = ApproximateGELU(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : Any = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase_ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase_ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase_ , UpperCamelCase_ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase_ ) )
def __snake_case ( self , UpperCamelCase_ ):
for module in self.net:
UpperCAmelCase__ : Tuple = module(UpperCamelCase_ )
return hidden_states
class a ( nn.Module ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = "none" ):
super().__init__()
UpperCAmelCase__ : str = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = approximate
def __snake_case ( self , UpperCamelCase_ ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase_ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : int = self.proj(UpperCamelCase_ )
UpperCAmelCase__ : str = self.gelu(UpperCamelCase_ )
return hidden_states
class a ( nn.Module ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
super().__init__()
UpperCAmelCase__ : Tuple = nn.Linear(UpperCamelCase_ , dim_out * 2 )
def __snake_case ( self , UpperCamelCase_ ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase_ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.proj(UpperCamelCase_ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase_ )
class a ( nn.Module ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
super().__init__()
UpperCAmelCase__ : Dict = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : List[str] = self.proj(UpperCamelCase_ )
return x * torch.sigmoid(1.702 * x )
class a ( nn.Module ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
super().__init__()
UpperCAmelCase__ : str = nn.Embedding(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = nn.SiLU()
UpperCAmelCase__ : List[Any] = nn.Linear(UpperCamelCase_ , embedding_dim * 2 )
UpperCAmelCase__ : List[Any] = nn.LayerNorm(UpperCamelCase_ , elementwise_affine=UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : Optional[int] = self.linear(self.silu(self.emb(UpperCamelCase_ ) ) )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = torch.chunk(UpperCamelCase_ , 2 )
UpperCAmelCase__ : Tuple = self.norm(UpperCamelCase_ ) * (1 + scale) + shift
return x
class a ( nn.Module ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
super().__init__()
UpperCAmelCase__ : int = CombinedTimestepLabelEmbeddings(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : Tuple = nn.SiLU()
UpperCAmelCase__ : Dict = nn.Linear(UpperCamelCase_ , 6 * embedding_dim , bias=UpperCamelCase_ )
UpperCAmelCase__ : str = nn.LayerNorm(UpperCamelCase_ , elementwise_affine=UpperCamelCase_ , eps=1E-6 )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
UpperCAmelCase__ : Optional[Any] = self.linear(self.silu(self.emb(UpperCamelCase_ , UpperCamelCase_ , hidden_dtype=UpperCamelCase_ ) ) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = emb.chunk(6 , dim=1 )
UpperCAmelCase__ : Optional[Any] = self.norm(UpperCamelCase_ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class a ( nn.Module ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = 1E-5 ):
super().__init__()
UpperCAmelCase__ : Union[str, Any] = num_groups
UpperCAmelCase__ : int = eps
if act_fn is None:
UpperCAmelCase__ : List[str] = None
else:
UpperCAmelCase__ : Union[str, Any] = get_activation(UpperCamelCase_ )
UpperCAmelCase__ : Dict = nn.Linear(UpperCamelCase_ , out_dim * 2 )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
if self.act:
UpperCAmelCase__ : List[Any] = self.act(UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = self.linear(UpperCamelCase_ )
UpperCAmelCase__ : str = emb[:, :, None, None]
UpperCAmelCase__ , UpperCAmelCase__ : int = emb.chunk(2 , dim=1 )
UpperCAmelCase__ : Optional[int] = F.group_norm(UpperCamelCase_ , self.num_groups , eps=self.eps )
UpperCAmelCase__ : Any = x * (1 + scale) + shift
return x
| 110 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__A : str = random.Random()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
if rng is None:
_A = global_rng
_A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=400 , snake_case_=2000 , snake_case_=2048 , snake_case_=128 , snake_case_=1 , snake_case_=512 , snake_case_=30 , snake_case_=4_4100 , ):
_A = parent
_A = batch_size
_A = min_seq_length
_A = max_seq_length
_A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A = spectrogram_length
_A = feature_size
_A = num_audio_channels
_A = hop_length
_A = chunk_length
_A = sampling_rate
def lowerCAmelCase__ ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCAmelCase__ ( self , snake_case_=False , snake_case_=False ):
def _flatten(snake_case_ ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
_A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase( __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = TvltFeatureExtractor
def lowerCAmelCase__ ( self ):
_A = TvltFeatureExtractionTester(self )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(snake_case_ , 'feature_size' ) )
self.assertTrue(hasattr(snake_case_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(snake_case_ , 'hop_length' ) )
self.assertTrue(hasattr(snake_case_ , 'chunk_length' ) )
self.assertTrue(hasattr(snake_case_ , 'sampling_rate' ) )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
_A = self.feature_extraction_class.from_pretrained(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(snake_case_ , 'feat_extract.json' )
feat_extract_first.to_json_file(snake_case_ )
_A = self.feature_extraction_class.from_json_file(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
# Initialize feature_extractor
_A = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test not batched input
_A = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A = feature_extractor(
snake_case_ , return_tensors='np' , sampling_rate=4_4100 , mask_audio=snake_case_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A = np.asarray(snake_case_ )
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCAmelCase__ ( self , snake_case_ ):
_A = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_A = ds.sort('id' ).select(range(snake_case_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self ):
_A = self._load_datasamples(1 )
_A = TvltFeatureExtractor()
_A = feature_extractor(snake_case_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case_ , atol=1E-4 ) )
| 27 | 0 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
UpperCAmelCase__ = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
UpperCAmelCase__ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
UpperCAmelCase__ = BeautifulSoup(res.text, 'html.parser')
UpperCAmelCase__ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F"https://google.com{link.get('href')}")
| 224 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('check_bouncy() accepts only integer arguments' )
_A = str(_SCREAMING_SNAKE_CASE )
_A = ''.join(sorted(_SCREAMING_SNAKE_CASE ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError('solution() only accepts values from 0 to 100' )
_A = 0
_A = 1
while True:
if check_bouncy(_SCREAMING_SNAKE_CASE ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"{solution(99)}")
| 27 | 0 |
def lowerCamelCase__ ( _a , _a):
if density <= 0:
raise ValueError("Impossible fluid density")
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus")
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"{price_plus_tax(100, 0.2_5) = }")
print(f"{price_plus_tax(1_2_5.5_0, 0.0_5) = }")
| 27 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class A__ ( unittest.TestCase ):
def A ( self : Union[str, Any] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def A ( self : List[Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_SCREAMING_SNAKE_CASE =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_SCREAMING_SNAKE_CASE ='xvjiarui/stable-diffusion-2-inpainting'
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case_ , safety_checker=snake_case_ )
_SCREAMING_SNAKE_CASE ='Face of a yellow cat, high resolution, sitting on a park bench'
_SCREAMING_SNAKE_CASE =jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE =50
_SCREAMING_SNAKE_CASE =jax.device_count()
_SCREAMING_SNAKE_CASE =num_samples * [prompt]
_SCREAMING_SNAKE_CASE =num_samples * [init_image]
_SCREAMING_SNAKE_CASE =num_samples * [mask_image]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipeline.prepare_inputs(snake_case_ , snake_case_ , snake_case_ )
# shard inputs and rng
_SCREAMING_SNAKE_CASE =replicate(snake_case_ )
_SCREAMING_SNAKE_CASE =jax.random.split(snake_case_ , jax.device_count() )
_SCREAMING_SNAKE_CASE =shard(snake_case_ )
_SCREAMING_SNAKE_CASE =shard(snake_case_ )
_SCREAMING_SNAKE_CASE =shard(snake_case_ )
_SCREAMING_SNAKE_CASE =pipeline(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , jit=snake_case_ )
_SCREAMING_SNAKE_CASE =output.images.reshape(snake_case_ , 512 , 512 , 3 )
_SCREAMING_SNAKE_CASE =images[0, 253:256, 253:256, -1]
_SCREAMING_SNAKE_CASE =jnp.asarray(jax.device_get(image_slice.flatten() ) )
_SCREAMING_SNAKE_CASE =jnp.array(
[0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 405 |
from collections.abc import Callable
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
_A = a
_A = b
if function(_SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(_SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
_A = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) < 0:
_A = mid
else:
_A = mid
_A = start + (end - start) / 2.0
return mid
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 27 | 0 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : Tuple ): # noqa: E741
'''simple docstring'''
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
_lowerCAmelCase = [0] * n
_lowerCAmelCase = [False] * n
_lowerCAmelCase = [False] * n
def dfs(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase = True
_lowerCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase = True
else:
_lowerCAmelCase = min(low[at] , _SCREAMING_SNAKE_CASE )
return out_edge_count
for i in range(_SCREAMING_SNAKE_CASE ):
if not visited[i]:
_lowerCAmelCase = 0
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = out_edge_count > 1
for x in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_art[x] is True:
print(_SCREAMING_SNAKE_CASE )
# Adjacency list of graph
_SCREAMING_SNAKE_CASE = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 18 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def lowerCAmelCase__ ( self ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = NystromformerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
_A = model(snake_case_ , token_type_ids=snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = NystromformerForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = NystromformerForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = NystromformerForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = NystromformerForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_choices
_A = NystromformerForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self ):
_A = self.prepare_config_and_inputs()
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = config_and_inputs
_A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def lowerCAmelCase__ ( self ):
_A = NystromformerModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A = type
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowerCAmelCase__ ( self ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = NystromformerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ):
_A = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
_A = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_A = model(snake_case_ )[0]
_A = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self ):
_A = 'the [MASK] of Belgium is Brussels'
_A = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
_A = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
_A = tokenizer(snake_case_ , return_tensors='pt' )
with torch.no_grad():
_A = model(encoding.input_ids ).logits
_A = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case_ ) , 'capital' )
| 27 | 0 |
import os
from collections.abc import Iterator
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] = "." ) -> Iterator[str]:
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(_SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE: List[str] = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_SCREAMING_SNAKE_CASE )[1] in (".py", ".ipynb"):
yield os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).lstrip('''./''' )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
return F"""{i * " "}*""" if i else "\n##"
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_SCREAMING_SNAKE_CASE ) or old_parts[i] != new_part) and new_part:
print(F"""{md_prefix(_SCREAMING_SNAKE_CASE )} {new_part.replace("_" , " " ).title()}""" )
return new_path
def lowerCAmelCase ( UpperCamelCase__ : int = "." ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = ''''''
for filepath in sorted(good_file_paths(_SCREAMING_SNAKE_CASE ) ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: List[Any] = os.path.split(_SCREAMING_SNAKE_CASE )
if filepath != old_path:
__SCREAMING_SNAKE_CASE: Dict = print_path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE: Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
__SCREAMING_SNAKE_CASE: Optional[int] = F"""{filepath}/{filename}""".replace(''' ''' , '''%20''' )
__SCREAMING_SNAKE_CASE: int = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(F"""{md_prefix(_SCREAMING_SNAKE_CASE )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md(""".""")
| 202 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Dict = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 27 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def A_ ( SCREAMING_SNAKE_CASE_ ) ->list[int]:
if num <= 0:
lowercase_ = f"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(_SCREAMING_SNAKE_CASE )
lowercase_ = [True] * (num + 1)
lowercase_ = []
lowercase_ = 2
lowercase_ = int(math.sqrt(_SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , _SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
lowercase_ = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 451 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__A : List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
| 27 | 0 |
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __a ( a ):
"""simple docstring"""
return TrainCommand(_SCREAMING_SNAKE_CASE )
class __snake_case ( __snake_case ):
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ :Tuple ):
_a = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=snake_case_ , required=snake_case_ , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=snake_case_ , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=snake_case_ , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=snake_case_ , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=snake_case_ , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=snake_case_ , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=snake_case_ , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=snake_case_ , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=snake_case_ , default="bert-base-uncased" , help="Model\'s name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=snake_case_ , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=snake_case_ , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=snake_case_ , default=3E-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=snake_case_ , default=1E-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=snake_case_ )
def __init__( self :List[str] , UpperCamelCase__ :Union[str, Any] ):
_a = logging.get_logger("transformers-cli/training" )
_a = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=snake_case_ )
_a = args.output
_a = args.column_label
_a = args.column_text
_a = args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
_a = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
_a = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_a = None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
_a = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_a = args.validation_split
_a = args.train_batch_size
_a = args.valid_batch_size
_a = args.learning_rate
_a = args.adam_epsilon
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def SCREAMING_SNAKE_CASE_ ( self :int ):
raise NotImplementedError
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 388 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def __lowerCAmelCase( ) -> None:
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 27 | 0 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_UpperCamelCase = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_UpperCamelCase = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_UpperCamelCase = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_UpperCamelCase = {
"num_train_timesteps": 40,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
_UpperCamelCase = {
"num_train_timesteps": 201,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
_UpperCamelCase = {
"num_train_timesteps": 151,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
def _lowercase ( lowercase__ ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=False ):
__lowerCAmelCase : Tuple = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
__lowerCAmelCase : Tuple = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
__lowerCAmelCase : Union[str, Any] = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
__lowerCAmelCase : Union[str, Any] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
__lowerCAmelCase : str = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
__lowerCAmelCase : int = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
__lowerCAmelCase : Optional[Any] = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
__lowerCAmelCase : Optional[Any] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
__lowerCAmelCase : int = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
__lowerCAmelCase : str = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
__lowerCAmelCase : str = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
__lowerCAmelCase : Tuple = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Optional[int] = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Dict = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
__lowerCAmelCase : Optional[Any] = checkpoint[f"""{old_prefix}.norm.weight"""]
__lowerCAmelCase : Dict = checkpoint[f"""{old_prefix}.norm.bias"""]
__lowerCAmelCase : Any = weight_q.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase : Tuple = bias_q.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase : int = weight_k.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase : Dict = bias_k.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase : int = weight_v.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase : Union[str, Any] = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
__lowerCAmelCase : Dict = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Tuple = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )
__lowerCAmelCase : Optional[int] = {}
__lowerCAmelCase : Any = checkpoint['''time_embed.0.weight''']
__lowerCAmelCase : Optional[Any] = checkpoint['''time_embed.0.bias''']
__lowerCAmelCase : Dict = checkpoint['''time_embed.2.weight''']
__lowerCAmelCase : Dict = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
__lowerCAmelCase : Dict = checkpoint['''label_emb.weight''']
__lowerCAmelCase : List[str] = checkpoint['''input_blocks.0.0.weight''']
__lowerCAmelCase : int = checkpoint['''input_blocks.0.0.bias''']
__lowerCAmelCase : Dict = unet_config['''down_block_types''']
__lowerCAmelCase : List[Any] = unet_config['''layers_per_block''']
__lowerCAmelCase : List[Any] = unet_config['''attention_head_dim''']
__lowerCAmelCase : List[str] = unet_config['''block_out_channels''']
__lowerCAmelCase : Any = 1
__lowerCAmelCase : Any = channels_list[0]
for i, layer_type in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = channels_list[i]
__lowerCAmelCase : List[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = f"""down_blocks.{i}.resnets.{j}"""
__lowerCAmelCase : Tuple = f"""input_blocks.{current_layer}.0"""
__lowerCAmelCase : Optional[Any] = True if j == 0 and downsample_block_has_skip else False
__lowerCAmelCase : int = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , has_skip=_SCREAMING_SNAKE_CASE )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = f"""down_blocks.{i}.resnets.{j}"""
__lowerCAmelCase : Optional[Any] = f"""input_blocks.{current_layer}.0"""
__lowerCAmelCase : int = True if j == 0 and downsample_block_has_skip else False
__lowerCAmelCase : Union[str, Any] = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , has_skip=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = f"""down_blocks.{i}.attentions.{j}"""
__lowerCAmelCase : Optional[int] = f"""input_blocks.{current_layer}.1"""
__lowerCAmelCase : List[Any] = convert_attention(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
current_layer += 1
if i != len(_SCREAMING_SNAKE_CASE ) - 1:
__lowerCAmelCase : List[str] = f"""down_blocks.{i}.downsamplers.0"""
__lowerCAmelCase : str = f"""input_blocks.{current_layer}.0"""
__lowerCAmelCase : Dict = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
current_layer += 1
__lowerCAmelCase : Optional[int] = current_channels
# hardcoded the mid-block for now
__lowerCAmelCase : Dict = '''mid_block.resnets.0'''
__lowerCAmelCase : List[Any] = '''middle_block.0'''
__lowerCAmelCase : Optional[int] = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = '''mid_block.attentions.0'''
__lowerCAmelCase : Optional[int] = '''middle_block.1'''
__lowerCAmelCase : List[Any] = convert_attention(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = '''mid_block.resnets.1'''
__lowerCAmelCase : Any = '''middle_block.2'''
__lowerCAmelCase : str = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : Dict = unet_config['''up_block_types''']
for i, layer_type in enumerate(_SCREAMING_SNAKE_CASE ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__lowerCAmelCase : List[Any] = f"""up_blocks.{i}.resnets.{j}"""
__lowerCAmelCase : int = f"""output_blocks.{current_layer}.0"""
__lowerCAmelCase : Dict = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , has_skip=_SCREAMING_SNAKE_CASE )
current_layer += 1
if i != len(_SCREAMING_SNAKE_CASE ) - 1:
__lowerCAmelCase : Union[str, Any] = f"""up_blocks.{i}.upsamplers.0"""
__lowerCAmelCase : List[str] = f"""output_blocks.{current_layer-1}.1"""
__lowerCAmelCase : Any = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__lowerCAmelCase : str = f"""up_blocks.{i}.resnets.{j}"""
__lowerCAmelCase : str = f"""output_blocks.{current_layer}.0"""
__lowerCAmelCase : List[str] = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , has_skip=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = f"""up_blocks.{i}.attentions.{j}"""
__lowerCAmelCase : Optional[Any] = f"""output_blocks.{current_layer}.1"""
__lowerCAmelCase : int = convert_attention(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
current_layer += 1
if i != len(_SCREAMING_SNAKE_CASE ) - 1:
__lowerCAmelCase : Optional[Any] = f"""up_blocks.{i}.upsamplers.0"""
__lowerCAmelCase : Dict = f"""output_blocks.{current_layer-1}.2"""
__lowerCAmelCase : Optional[int] = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = checkpoint['''out.0.weight''']
__lowerCAmelCase : List[str] = checkpoint['''out.0.bias''']
__lowerCAmelCase : str = checkpoint['''out.2.weight''']
__lowerCAmelCase : List[Any] = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = strabool(args.class_cond)
_UpperCamelCase = os.path.basename(args.unet_path)
print(F"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
_UpperCamelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_UpperCamelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_UpperCamelCase = TEST_UNET_CONFIG
else:
raise ValueError(F"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
_UpperCamelCase = None
_UpperCamelCase = con_pt_to_diffuser(args.unet_path, unet_config)
_UpperCamelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_UpperCamelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_UpperCamelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_UpperCamelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"Checkpoint type {ckpt_name} is not currently supported.")
_UpperCamelCase = CMStochasticIterativeScheduler(**scheduler_config)
_UpperCamelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 492 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , ):
_A = parent
_A = 13
_A = 7
_A = True
_A = True
_A = True
_A = 99
_A = 32
_A = 2
_A = 4
_A = 37
_A = 'gelu'
_A = 0.1
_A = 0.1
_A = 512
_A = 16
_A = 2
_A = 0.02
_A = 3
_A = 4
_A = None
def lowerCAmelCase__ ( self ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = self.prepare_config_and_inputs()
_A = True
_A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = TFEsmModel(config=snake_case_ )
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
_A = model(snake_case_ )
_A = [input_ids, input_mask]
_A = model(snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_A = True
_A = TFEsmModel(config=snake_case_ )
_A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
_A = model(snake_case_ )
_A = [input_ids, input_mask]
_A = model(snake_case_ , encoder_hidden_states=snake_case_ )
# Also check the case where encoder outputs are not passed
_A = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = TFEsmForMaskedLM(config=snake_case_ )
_A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = TFEsmForTokenClassification(config=snake_case_ )
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
_A = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self ):
_A = self.prepare_config_and_inputs()
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = config_and_inputs
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__magic_name__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def lowerCAmelCase__ ( self ):
_A = TFEsmModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowerCAmelCase__ ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFEsmModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip('Protein models do not support embedding resizing.' )
def lowerCAmelCase__ ( self ):
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
_A, _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(snake_case_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_A = model.get_bias()
assert isinstance(snake_case_ , snake_case_ )
for k, v in name.items():
assert isinstance(snake_case_ , tf.Variable )
else:
_A = model.get_output_embeddings()
assert x is None
_A = model.get_bias()
assert name is None
@require_tf
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ):
_A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
_A = tf.constant([[0, 1, 2, 3, 4, 5]] )
_A = model(snake_case_ )[0]
_A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , snake_case_ )
# compare the actual values for a slice.
_A = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def lowerCAmelCase__ ( self ):
_A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
_A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_A = model(snake_case_ )[0]
# compare the actual values for a slice.
_A = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 27 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __snake_case ( __snake_case ):
_a : Optional[Any]= 42
class __snake_case ( __snake_case , __snake_case ):
@register_to_config
def __init__( self ,snake_case = 65536 ,snake_case = None ,snake_case = 2 ,snake_case = 2 ,snake_case = 0 ,snake_case = "fourier" ,snake_case = True ,snake_case = False ,snake_case = 0.0 ,snake_case = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") ,snake_case = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") ,snake_case = "UNetMidBlock1D" ,snake_case = None ,snake_case = (32, 32, 64) ,snake_case = None ,snake_case = 8 ,snake_case = 1 ,snake_case = False ,):
'''simple docstring'''
super().__init__()
lowercase : List[str] = sample_size
# time
if time_embedding_type == "fourier":
lowercase : Optional[int] = GaussianFourierProjection(
embedding_size=8 ,set_W_to_weight=snake_case_ ,log=snake_case_ ,flip_sin_to_cos=snake_case_ )
lowercase : List[str] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowercase : Union[str, Any] = Timesteps(
block_out_channels[0] ,flip_sin_to_cos=snake_case_ ,downscale_freq_shift=snake_case_ )
lowercase : Tuple = block_out_channels[0]
if use_timestep_embedding:
lowercase : List[Any] = block_out_channels[0] * 4
lowercase : Optional[Any] = TimestepEmbedding(
in_channels=snake_case_ ,time_embed_dim=snake_case_ ,act_fn=snake_case_ ,out_dim=block_out_channels[0] ,)
lowercase : int = nn.ModuleList([] )
lowercase : Union[str, Any] = None
lowercase : str = nn.ModuleList([] )
lowercase : Tuple = None
# down
lowercase : Dict = in_channels
for i, down_block_type in enumerate(snake_case_ ):
lowercase : Dict = output_channel
lowercase : Optional[int] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowercase : int = i == len(snake_case_ ) - 1
lowercase : int = get_down_block(
snake_case_ ,num_layers=snake_case_ ,in_channels=snake_case_ ,out_channels=snake_case_ ,temb_channels=block_out_channels[0] ,add_downsample=not is_final_block or downsample_each_block ,)
self.down_blocks.append(snake_case_ )
# mid
lowercase : Union[str, Any] = get_mid_block(
snake_case_ ,in_channels=block_out_channels[-1] ,mid_channels=block_out_channels[-1] ,out_channels=block_out_channels[-1] ,embed_dim=block_out_channels[0] ,num_layers=snake_case_ ,add_downsample=snake_case_ ,)
# up
lowercase : str = list(reversed(snake_case_ ) )
lowercase : Any = reversed_block_out_channels[0]
if out_block_type is None:
lowercase : Optional[int] = out_channels
else:
lowercase : Any = block_out_channels[0]
for i, up_block_type in enumerate(snake_case_ ):
lowercase : List[Any] = output_channel
lowercase : Tuple = (
reversed_block_out_channels[i + 1] if i < len(snake_case_ ) - 1 else final_upsample_channels
)
lowercase : Dict = i == len(snake_case_ ) - 1
lowercase : Any = get_up_block(
snake_case_ ,num_layers=snake_case_ ,in_channels=snake_case_ ,out_channels=snake_case_ ,temb_channels=block_out_channels[0] ,add_upsample=not is_final_block ,)
self.up_blocks.append(snake_case_ )
lowercase : List[str] = output_channel
# out
lowercase : Optional[int] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 ,32 )
lowercase : Optional[int] = get_out_block(
out_block_type=snake_case_ ,num_groups_out=snake_case_ ,embed_dim=block_out_channels[0] ,out_channels=snake_case_ ,act_fn=snake_case_ ,fc_dim=block_out_channels[-1] // 4 ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case = True ,):
'''simple docstring'''
lowercase : str = timestep
if not torch.is_tensor(snake_case_ ):
lowercase : List[Any] = torch.tensor([timesteps] ,dtype=torch.long ,device=sample.device )
elif torch.is_tensor(snake_case_ ) and len(timesteps.shape ) == 0:
lowercase : Optional[int] = timesteps[None].to(sample.device )
lowercase : List[str] = self.time_proj(snake_case_ )
if self.config.use_timestep_embedding:
lowercase : Optional[int] = self.time_mlp(snake_case_ )
else:
lowercase : Optional[int] = timestep_embed[..., None]
lowercase : str = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowercase : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowercase : Optional[Any] = ()
for downsample_block in self.down_blocks:
lowercase , lowercase : Union[str, Any] = downsample_block(hidden_states=snake_case_ ,temb=snake_case_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowercase : List[str] = self.mid_block(snake_case_ ,snake_case_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowercase : Tuple = down_block_res_samples[-1:]
lowercase : List[str] = down_block_res_samples[:-1]
lowercase : Dict = upsample_block(snake_case_ ,res_hidden_states_tuple=snake_case_ ,temb=snake_case_ )
# 5. post-process
if self.out_block:
lowercase : str = self.out_block(snake_case_ ,snake_case_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=snake_case_ )
| 336 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = filter(lambda _SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() )
_A = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__A : Union[str, Any] = logging.getLogger(__name__)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if metric == "rouge2":
_A = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_A = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_A = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_A = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_A = ModelCheckpoint(
dirpath=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , )
class lowerCamelCase( pl.Callback ):
'''simple docstring'''
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
_A = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case_ )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=True ):
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****" )
_A = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_A = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A = od / 'test_results.txt'
_A = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_A = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=snake_case_ )
generations_file.parent.mkdir(exist_ok=snake_case_ )
with open(snake_case_ , 'a+' ) as writer:
for key in sorted(snake_case_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_A = metrics[key]
if isinstance(snake_case_ , torch.Tensor ):
_A = val.item()
_A = F"{key}: {val:.6f}\n"
writer.write(snake_case_ )
if not save_generations:
return
if "preds" in metrics:
_A = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(snake_case_ )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
try:
_A = pl_module.model.model.num_parameters()
except AttributeError:
_A = pl_module.model.num_parameters()
_A = count_trainable_parameters(snake_case_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case_ , snake_case_ , 'test' )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 27 | 0 |
from math import pi, sqrt, tan
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
a = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(_SCREAMING_SNAKE_CASE, 2 ) * torus_radius * tube_radius
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
a = (sidea + sidea + sidea) / 2
a = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("""[DEMO] Areas of various geometric shapes: \n""")
print(F"Rectangle: {area_rectangle(10, 20) = }")
print(F"Square: {area_square(10) = }")
print(F"Triangle: {area_triangle(10, 10) = }")
print(F"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(F"Parallelogram: {area_parallelogram(10, 20) = }")
print(F"Rhombus: {area_rhombus(10, 20) = }")
print(F"Trapezium: {area_trapezium(10, 20, 30) = }")
print(F"Circle: {area_circle(20) = }")
print(F"Ellipse: {area_ellipse(10, 20) = }")
print("""\nSurface Areas of various geometric shapes: \n""")
print(F"Cube: {surface_area_cube(20) = }")
print(F"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(F"Sphere: {surface_area_sphere(20) = }")
print(F"Hemisphere: {surface_area_hemisphere(20) = }")
print(F"Cone: {surface_area_cone(10, 20) = }")
print(F"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(F"Cylinder: {surface_area_cylinder(10, 20) = }")
print(F"Torus: {surface_area_torus(20, 10) = }")
print(F"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(F"Square: {area_reg_polygon(4, 10) = }")
print(F"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 387 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = [[float('inf' ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_A = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_SCREAMING_SNAKE_CASE ):
# looping through rows of graph array
for i in range(_SCREAMING_SNAKE_CASE ):
# looping through columns of graph array
for j in range(_SCREAMING_SNAKE_CASE ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_A = dist[i][k] + dist[k][j]
_print_dist(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return dist, v
if __name__ == "__main__":
__A : Dict = int(input("Enter number of vertices: "))
__A : Union[str, Any] = int(input("Enter number of edges: "))
__A : List[str] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
__A : List[Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
__A : Union[str, Any] = int(input("Enter source:"))
__A : List[str] = int(input("Enter destination:"))
__A : Union[str, Any] = float(input("Enter weight:"))
__A : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 27 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : int )->Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Union[str, Any] )->str:
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_UpperCAmelCase = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
sd_pipe.set_scheduler('''sample_euler''' )
_UpperCAmelCase = '''A painting of a squirrel eating a burger'''
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sd_pipe([prompt] , generator=snake_case_ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='''np''' )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self : Union[str, Any] )->Tuple:
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_UpperCAmelCase = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
sd_pipe.set_scheduler('''sample_euler''' )
_UpperCAmelCase = '''A painting of a squirrel eating a burger'''
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sd_pipe([prompt] , generator=snake_case_ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='''np''' )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def lowercase__ ( self : Union[str, Any] )->str:
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_UpperCAmelCase = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
_UpperCAmelCase = '''A painting of a squirrel eating a burger'''
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sd_pipe(
[prompt] , generator=snake_case_ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='''np''' , use_karras_sigmas=snake_case_ , )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 602 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__A : Optional[int] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
if subparsers is not None:
_A = subparsers.add_parser('tpu-config' , description=_description )
else:
_A = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
_A = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=_SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=_SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
_A = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=_SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_SCREAMING_SNAKE_CASE ):
_A = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_A = defaults.command_file
if not args.command and defaults.commands is not None:
_A = defaults.commands
if not args.tpu_name:
_A = defaults.tpu_name
if not args.tpu_zone:
_A = defaults.tpu_zone
if args.accelerate_version == "dev":
_A = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
_A = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , _SCREAMING_SNAKE_CASE ):
_A = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
_A = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _SCREAMING_SNAKE_CASE ):
_A = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_A = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
_A = '; '.join(_SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_A = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(_SCREAMING_SNAKE_CASE )}" )
return
subprocess.run(_SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def __lowerCAmelCase( ) -> Tuple:
"""simple docstring"""
_A = tpu_command_parser()
_A = parser.parse_args()
tpu_command_launcher(_SCREAMING_SNAKE_CASE )
| 27 | 0 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
def _UpperCAmelCase ( __lowerCamelCase : int ) -> Union[str, Any]:
_snake_case = R'''\w+[.]\d+'''
_snake_case = re.findall(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for pat in pats:
_snake_case = key.replace(_SCREAMING_SNAKE_CASE , '''_'''.join(pat.split('''.''' ) ) )
return key
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> List[str]:
_snake_case = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_snake_case = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_snake_case = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_snake_case = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_snake_case = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_snake_case = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_snake_case = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
_snake_case = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_snake_case = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_snake_case = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]=42 ) -> Union[str, Any]:
_snake_case = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_snake_case = flax_model.init_weights(PRNGKey(_SCREAMING_SNAKE_CASE ) )
_snake_case = flatten_dict(_SCREAMING_SNAKE_CASE )
_snake_case = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_snake_case = rename_key(_SCREAMING_SNAKE_CASE )
_snake_case = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
_snake_case , _snake_case = rename_key_and_reshape_tensor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
_snake_case = jnp.asarray(_SCREAMING_SNAKE_CASE )
return unflatten_dict(_SCREAMING_SNAKE_CASE )
| 224 |
from ... import PretrainedConfig
__A : Optional[Any] = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__magic_name__ = 'nezha'
def __init__( self , snake_case_=2_1128 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=64 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.1 , snake_case_=0 , snake_case_=2 , snake_case_=3 , snake_case_=True , **snake_case_ , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = max_relative_position
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = classifier_dropout
_A = use_cache
| 27 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : str = filter(lambda _a: p.requires_grad , model.parameters())
SCREAMING_SNAKE_CASE : Dict = sum([np.prod(p.size()) for p in model_parameters])
return params
a_ = logging.getLogger(__name__)
def lowerCamelCase__ ( _a , _a):
if metric == "rouge2":
SCREAMING_SNAKE_CASE : Dict = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
SCREAMING_SNAKE_CASE : Dict = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
SCREAMING_SNAKE_CASE : Any = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
" function.")
SCREAMING_SNAKE_CASE : Optional[int] = ModelCheckpoint(
dirpath=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , monitor=f"val_{metric}" , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase__ ( _a , _a):
return EarlyStopping(
monitor=f"val_{metric}" , mode="min" if "loss" in metric else "max" , patience=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , )
class _UpperCamelCase ( pl.Callback ):
'''simple docstring'''
def __UpperCamelCase ( self : Dict , a : Optional[int] , a : Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = {F"lr_group_{i}": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case_ )
@rank_zero_only
def __UpperCamelCase ( self : Optional[int] , a : List[str] , a : Any , a : int , a : List[str]=True ) -> int:
"""simple docstring"""
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****" )
SCREAMING_SNAKE_CASE : Optional[int] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
SCREAMING_SNAKE_CASE : List[str] = Path(pl_module.hparams.output_dir )
if type_path == "test":
SCREAMING_SNAKE_CASE : Any = od / "test_results.txt"
SCREAMING_SNAKE_CASE : Dict = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
SCREAMING_SNAKE_CASE : List[str] = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
SCREAMING_SNAKE_CASE : Any = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=snake_case_ )
generations_file.parent.mkdir(exist_ok=snake_case_ )
with open(snake_case_ , "a+" ) as writer:
for key in sorted(snake_case_ ):
if key in ["log", "progress_bar", "preds"]:
continue
SCREAMING_SNAKE_CASE : Optional[int] = metrics[key]
if isinstance(snake_case_ , torch.Tensor ):
SCREAMING_SNAKE_CASE : List[Any] = val.item()
SCREAMING_SNAKE_CASE : Any = F"{key}: {val:.6f}\n"
writer.write(snake_case_ )
if not save_generations:
return
if "preds" in metrics:
SCREAMING_SNAKE_CASE : Tuple = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(snake_case_ )
@rank_zero_only
def __UpperCamelCase ( self : List[str] , a : Any , a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE : Optional[Any] = pl_module.model.model.num_parameters()
except AttributeError:
SCREAMING_SNAKE_CASE : List[str] = pl_module.model.num_parameters()
SCREAMING_SNAKE_CASE : Union[str, Any] = count_trainable_parameters(snake_case_ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def __UpperCamelCase ( self : Any , a : Optional[Any] , a : int ) -> Any:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case_ , snake_case_ , "test" )
@rank_zero_only
def __UpperCamelCase ( self : Dict , a : List[str] , a : Dict ) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 25 |
from collections import defaultdict
from math import ceil, sqrt
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 1_000_000 , _SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
_A = defaultdict(_SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_A = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_A = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"{solution() = }")
| 27 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : List[str] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = ["ConditionalDetrFeatureExtractor"]
lowerCamelCase : Optional[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
lowerCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 405 |
from math import pi, sqrt, tan
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
_A = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
_A = (sidea + sidea + sidea) / 2
_A = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 27 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ ( __snake_case ,__snake_case ,unittest.TestCase ):
__lowerCamelCase : int = IFInpaintingPipeline
__lowerCamelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
__lowerCamelCase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"latents"}
def _snake_case ( self ) -> Any:
return self._get_dummy_components()
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Optional[int]:
if str(snake_case_ ).startswith("mps" ):
_lowerCAmelCase = torch.manual_seed(snake_case_ )
else:
_lowerCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _snake_case ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _snake_case ( self ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _snake_case ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _snake_case ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _snake_case ( self ) -> List[str]:
self._test_save_load_local()
def _snake_case ( self ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 18 |
import numpy as np
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> np.array:
"""simple docstring"""
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 | 0 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCAmelCase : List[str] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a ( datasets.BuilderConfig ):
SCREAMING_SNAKE_CASE__ : Tuple = None
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : int , ) -> Tuple:
"""simple docstring"""
import pyspark
def generate_fn():
__SCREAMING_SNAKE_CASE: Optional[Any] = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
__SCREAMING_SNAKE_CASE: Optional[int] = df_with_partition_id.select('''*''' ).where(F"""part_id = {partition_id}""" ).drop('''part_id''' )
__SCREAMING_SNAKE_CASE: Union[str, Any] = partition_df.collect()
__SCREAMING_SNAKE_CASE: List[str] = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class a ( _BaseExamplesIterable ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = df
__SCREAMING_SNAKE_CASE: List[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
__SCREAMING_SNAKE_CASE: str = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
"""simple docstring"""
yield from self.generate_examples_fn()
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(snake_case_ )
return SparkExamplesIterable(self.df , partition_order=snake_case_ )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = self.split_shard_indices_by_worker(snake_case_ , snake_case_ )
return SparkExamplesIterable(self.df , partition_order=snake_case_ )
@property
def snake_case_ ( self ):
"""simple docstring"""
return len(self.partition_order )
class a ( datasets.DatasetBuilder ):
SCREAMING_SNAKE_CASE__ : Tuple = SparkConfig
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ):
"""simple docstring"""
import pyspark
__SCREAMING_SNAKE_CASE: Optional[int] = pyspark.sql.SparkSession.builder.getOrCreate()
__SCREAMING_SNAKE_CASE: Optional[Any] = df
__SCREAMING_SNAKE_CASE: Optional[Any] = working_dir
super().__init__(
cache_dir=snake_case_ , config_name=str(self.df.semanticHash() ) , **snake_case_ , )
def snake_case_ ( self ):
"""simple docstring"""
def create_cache_and_write_probe(_lowerCAmelCase ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=snake_case_ )
__SCREAMING_SNAKE_CASE: Optional[Any] = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(snake_case_ , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__SCREAMING_SNAKE_CASE: int = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(snake_case_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def snake_case_ ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
import pyspark
def get_arrow_batch_size(_lowerCAmelCase ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
__SCREAMING_SNAKE_CASE: Optional[int] = self.df.count()
__SCREAMING_SNAKE_CASE: Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__SCREAMING_SNAKE_CASE: Tuple = (
self.df.limit(snake_case_ )
.repartition(1 )
.mapInArrow(snake_case_ , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__SCREAMING_SNAKE_CASE: Optional[int] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__SCREAMING_SNAKE_CASE: List[str] = min(snake_case_ , int(approx_total_size / max_shard_size ) )
__SCREAMING_SNAKE_CASE: Dict = self.df.repartition(snake_case_ )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
"""simple docstring"""
import pyspark
__SCREAMING_SNAKE_CASE: List[Any] = ParquetWriter if file_format == '''parquet''' else ArrowWriter
__SCREAMING_SNAKE_CASE: List[str] = os.path.join(self._working_dir , os.path.basename(snake_case_ ) ) if self._working_dir else fpath
__SCREAMING_SNAKE_CASE: Union[str, Any] = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__SCREAMING_SNAKE_CASE: List[Any] = self.config.features
__SCREAMING_SNAKE_CASE: Dict = self._writer_batch_size
__SCREAMING_SNAKE_CASE: int = self._fs.storage_options
def write_arrow(_lowerCAmelCase ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__SCREAMING_SNAKE_CASE: Optional[Any] = pyspark.TaskContext().taskAttemptId()
__SCREAMING_SNAKE_CASE: str = next(snake_case_ , snake_case_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
__SCREAMING_SNAKE_CASE: int = 0
__SCREAMING_SNAKE_CASE: int = writer_class(
features=snake_case_ , path=working_fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , writer_batch_size=snake_case_ , storage_options=snake_case_ , embed_local_files=snake_case_ , )
__SCREAMING_SNAKE_CASE: Dict = pa.Table.from_batches([first_batch] )
writer.write_table(snake_case_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: List[str] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
__SCREAMING_SNAKE_CASE: Any = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , writer_batch_size=snake_case_ , storage_options=snake_case_ , embed_local_files=snake_case_ , )
__SCREAMING_SNAKE_CASE: List[str] = pa.Table.from_batches([batch] )
writer.write_table(snake_case_ )
if writer._num_bytes > 0:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Union[str, Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(snake_case_ ) ):
__SCREAMING_SNAKE_CASE: List[Any] = os.path.join(os.path.dirname(snake_case_ ) , os.path.basename(snake_case_ ) )
shutil.move(snake_case_ , snake_case_ )
__SCREAMING_SNAKE_CASE: Dict = (
self.df.mapInArrow(snake_case_ , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = "arrow" , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ):
"""simple docstring"""
self._validate_cache_dir()
__SCREAMING_SNAKE_CASE: int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(snake_case_ )
__SCREAMING_SNAKE_CASE: Any = not is_remote_filesystem(self._fs )
__SCREAMING_SNAKE_CASE: int = os.path.join if is_local else posixpath.join
__SCREAMING_SNAKE_CASE: Any = '''-TTTTT-SSSSS-of-NNNNN'''
__SCREAMING_SNAKE_CASE: int = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
__SCREAMING_SNAKE_CASE: Dict = path_join(self._output_dir , snake_case_ )
__SCREAMING_SNAKE_CASE: str = 0
__SCREAMING_SNAKE_CASE: List[Any] = 0
__SCREAMING_SNAKE_CASE: List[Any] = 0
__SCREAMING_SNAKE_CASE: Tuple = []
__SCREAMING_SNAKE_CASE: Tuple = []
for task_id, content in self._prepare_split_single(snake_case_ , snake_case_ , snake_case_ ):
(
(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,
): Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(snake_case_ )
__SCREAMING_SNAKE_CASE: Optional[Any] = total_num_examples
__SCREAMING_SNAKE_CASE: Any = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
__SCREAMING_SNAKE_CASE: Any = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__SCREAMING_SNAKE_CASE: List[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
rename(
snake_case_ , fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , fpath.replace('''TTTTT-SSSSS''' , f"""{global_shard_id:05d}""" ).replace('''NNNNN''' , f"""{total_shards:05d}""" ) , )
__SCREAMING_SNAKE_CASE: Tuple = []
__SCREAMING_SNAKE_CASE: str = 0
for i in range(len(snake_case_ ) ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[Any] = task_id_and_num_shards[i]
for shard_id in range(snake_case_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(snake_case_ , len(snake_case_ ) ).map(lambda _lowerCAmelCase : _rename_shard(*snake_case_ ) ).collect()
else:
# don't use any pattern
__SCREAMING_SNAKE_CASE: Optional[int] = 0
__SCREAMING_SNAKE_CASE: List[str] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , fpath.replace(snake_case_ , '''''' ) , )
def snake_case_ ( self , _lowerCAmelCase , ):
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 202 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 27 | 0 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _a ( __snake_case ):
"""simple docstring"""
A_ = '''char'''
A_ = '''bpe'''
A_ = '''wp'''
__snake_case = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _a ( __snake_case ):
"""simple docstring"""
A_ = ['''image_processor''', '''char_tokenizer''']
A_ = '''ViTImageProcessor'''
A_ = '''MgpstrTokenizer'''
def __init__( self : Optional[int] , lowercase_ : Tuple=None , lowercase_ : Dict=None , **lowercase_ : Optional[int] ):
'''simple docstring'''
lowercase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , snake_case_ , )
lowercase_ = kwargs.pop("""feature_extractor""" )
lowercase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
lowercase_ = tokenizer
lowercase_ = AutoTokenizer.from_pretrained("""gpt2""" )
lowercase_ = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(snake_case_ , snake_case_ )
def __call__( self : List[str] , lowercase_ : Dict=None , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=None , **lowercase_ : List[str] ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
lowercase_ = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is not None:
lowercase_ = self.char_tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowercase_ = encodings["""input_ids"""]
return inputs
def lowerCamelCase__ ( self : Tuple , lowercase_ : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ = sequences
lowercase_ = char_preds.size(0 )
lowercase_ , lowercase_ = self._decode_helper(snake_case_ , """char""" )
lowercase_ , lowercase_ = self._decode_helper(snake_case_ , """bpe""" )
lowercase_ , lowercase_ = self._decode_helper(snake_case_ , """wp""" )
lowercase_ = []
lowercase_ = []
for i in range(snake_case_ ):
lowercase_ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowercase_ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowercase_ = scores.index(max(snake_case_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowercase_ = {}
lowercase_ = final_strs
lowercase_ = final_scores
lowercase_ = char_strs
lowercase_ = bpe_strs
lowercase_ = wp_strs
return out
def lowerCamelCase__ ( self : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowercase_ = self.char_decode
lowercase_ = 1
lowercase_ = """[s]"""
elif format == DecodeType.BPE:
lowercase_ = self.bpe_decode
lowercase_ = 2
lowercase_ = """#"""
elif format == DecodeType.WORDPIECE:
lowercase_ = self.wp_decode
lowercase_ = 102
lowercase_ = """[SEP]"""
else:
raise ValueError(F"""Format {format} is not supported.""" )
lowercase_ , lowercase_ = [], []
lowercase_ = pred_logits.size(0 )
lowercase_ = pred_logits.size(1 )
lowercase_ , lowercase_ = pred_logits.topk(1 , dim=-1 , largest=snake_case_ , sorted=snake_case_ )
lowercase_ = preds_index.view(-1 , snake_case_ )[:, 1:]
lowercase_ = decoder(snake_case_ )
lowercase_ , lowercase_ = torch.nn.functional.softmax(snake_case_ , dim=2 ).max(dim=2 )
lowercase_ = preds_max_prob[:, 1:]
for index in range(snake_case_ ):
lowercase_ = preds_str[index].find(snake_case_ )
lowercase_ = preds_str[index][:pred_eos]
lowercase_ = preds_index[index].cpu().tolist()
lowercase_ = pred_index.index(snake_case_ ) if eos_token in pred_index else -1
lowercase_ = preds_max_prob[index][: pred_eos_index + 1]
lowercase_ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(snake_case_ )
conf_scores.append(snake_case_ )
return dec_strs, conf_scores
def lowerCamelCase__ ( self : List[Any] , lowercase_ : Tuple ):
'''simple docstring'''
lowercase_ = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(snake_case_ )]
return decode_strs
def lowerCamelCase__ ( self : int , lowercase_ : List[Any] ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(snake_case_ )
def lowerCamelCase__ ( self : Tuple , lowercase_ : Dict ):
'''simple docstring'''
lowercase_ = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(snake_case_ )]
return decode_strs
| 451 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__A : List[Any] = "http://www.mocksite.com/file1.txt"
__A : List[Any] = "\"text\": [\"foo\", \"foo\"]"
__A : Dict = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class lowerCamelCase:
'''simple docstring'''
__magic_name__ = 200
__magic_name__ = {'Content-Length': '100'}
__magic_name__ = {}
def lowerCAmelCase__ ( self , **snake_case_ ):
return [bytes(snake_case_ , 'utf-8' )]
def __lowerCAmelCase( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
import requests
monkeypatch.setattr(_SCREAMING_SNAKE_CASE , 'request' , _SCREAMING_SNAKE_CASE )
_A = URL
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = url
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [url]
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = {'train': url}
_A = 'dummy'
_A = 'downloads'
_A = tmp_path
_A = DownloadConfig(
cache_dir=os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , use_etag=_SCREAMING_SNAKE_CASE , )
_A = DownloadManager(dataset_name=_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
_A = dl_manager.download(_SCREAMING_SNAKE_CASE )
_A = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [downloaded_paths]
_A = [urls]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert "train" in downloaded_paths.keys()
_A = downloaded_paths.values()
_A = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_A = Path(_SCREAMING_SNAKE_CASE )
_A = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_A = downloaded_path.read_text()
assert content == CONTENT
_A = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_A = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
_A = str(_SCREAMING_SNAKE_CASE )
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = filename
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [filename]
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = {'train': filename}
_A = 'dummy'
_A = xz_file.parent
_A = 'extracted'
_A = DownloadConfig(
cache_dir=_SCREAMING_SNAKE_CASE , use_etag=_SCREAMING_SNAKE_CASE , )
_A = DownloadManager(dataset_name=_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
_A = dl_manager.extract(_SCREAMING_SNAKE_CASE )
_A = paths
for extracted_paths in [extracted_paths]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [extracted_paths]
_A = [paths]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert "train" in extracted_paths.keys()
_A = extracted_paths.values()
_A = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_A = Path(_SCREAMING_SNAKE_CASE )
_A = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_SCREAMING_SNAKE_CASE , etag=_SCREAMING_SNAKE_CASE )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_A = extracted_path.read_text()
_A = text_file.read_text()
assert extracted_file_content == expected_file_content
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
assert path.endswith('.jsonl' )
for num_items, line in enumerate(_SCREAMING_SNAKE_CASE , start=1 ):
_A = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = request.getfixturevalue(_SCREAMING_SNAKE_CASE )
_A = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ):
_test_jsonl(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_A = request.getfixturevalue(_SCREAMING_SNAKE_CASE )
_A = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ):
_test_jsonl(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert num_tar == 1
assert num_jsonl == 2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) , start=1 ):
assert os.path.basename(_SCREAMING_SNAKE_CASE ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 27 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __snake_case :
"""simple docstring"""
def __init__( self :List[str] , UpperCamelCase__ :List[str] , ):
_a = parent
_a = 13
_a = 7
_a = True
_a = True
_a = True
_a = 99
_a = 32
_a = 2
_a = 4
_a = 37
_a = "gelu"
_a = 0.1
_a = 0.1
_a = 512
_a = 16
_a = 2
_a = 0.02
_a = 3
_a = 4
_a = None
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self :int ):
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = self.prepare_config_and_inputs()
_a = True
_a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_ ( self :str , UpperCamelCase__ :Any , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :List[str] , UpperCamelCase__ :str , UpperCamelCase__ :List[Any] , UpperCamelCase__ :Tuple ):
_a = TFEsmModel(config=snake_case_ )
_a = {"input_ids": input_ids, "attention_mask": input_mask}
_a = model(snake_case_ )
_a = [input_ids, input_mask]
_a = model(snake_case_ )
_a = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] , UpperCamelCase__ :List[Any] , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :List[Any] , UpperCamelCase__ :Tuple , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Tuple , UpperCamelCase__ :Tuple , UpperCamelCase__ :int , ):
_a = True
_a = TFEsmModel(config=snake_case_ )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
_a = model(snake_case_ )
_a = [input_ids, input_mask]
_a = model(snake_case_ , encoder_hidden_states=snake_case_ )
# Also check the case where encoder outputs are not passed
_a = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self :Tuple , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :str , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :Any ):
_a = TFEsmForMaskedLM(config=snake_case_ )
_a = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :List[str] , UpperCamelCase__ :Any , UpperCamelCase__ :Dict , UpperCamelCase__ :Dict , UpperCamelCase__ :List[str] ):
_a = self.num_labels
_a = TFEsmForTokenClassification(config=snake_case_ )
_a = {"input_ids": input_ids, "attention_mask": input_mask}
_a = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : Optional[Any] = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : int = False
def SCREAMING_SNAKE_CASE_ ( self :Any ):
_a = TFEsmModelTester(self )
_a = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self :int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self :Any ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE_ ( self :str ):
_a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*snake_case_ )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self :str ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFEsmModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_ ( self :Any ):
pass
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_ ( self :Any ):
pass
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(snake_case_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_a = model.get_bias()
assert isinstance(snake_case_ , snake_case_ )
for k, v in name.items():
assert isinstance(snake_case_ , tf.Variable )
else:
_a = model.get_output_embeddings()
assert x is None
_a = model.get_bias()
assert name is None
@require_tf
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
_a = tf.constant([[0, 1, 2, 3, 4, 5]] )
_a = model(snake_case_ )[0]
_a = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , snake_case_ )
# compare the actual values for a slice.
_a = tf.constant(
[
[
[8.921518, -10.589814, -6.4671307],
[-6.3967156, -13.911377, -1.1211915],
[-7.781247, -13.951557, -3.740592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self :Any ):
_a = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
_a = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_a = model(snake_case_ )[0]
# compare the actual values for a slice.
_a = tf.constant(
[
[
[0.14443092, 0.54125327, 0.3247739],
[0.30340484, 0.00526676, 0.31077722],
[0.32278043, -0.24987096, 0.3414628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 388 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
_A = int(number**0.5 )
return number == sq * sq
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> tuple[int, int]:
"""simple docstring"""
_A = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_A = x_den * y_den * z_den
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 35 ) -> int:
"""simple docstring"""
_A = set()
_A = 42
_A = Fraction(0 )
_A = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_A = x_num * y_den + x_den * y_num
_A = x_den * y_den
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_A = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_A = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
_A = x_num * y_num
_A = x_den * y_num + x_num * y_den
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_A = x_num * x_num * y_num * y_num
_A = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"{solution() = }")
| 27 | 0 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def _lowercase ( ):
__lowerCAmelCase : Dict = 1_0
__lowerCAmelCase : Optional[int] = datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
__lowerCAmelCase : Optional[int] = datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [9_7], '''text''': ['''1976''']}] * 1_0,
'''id''': list(range(_SCREAMING_SNAKE_CASE ) ),
} , features=_SCREAMING_SNAKE_CASE , )
return dataset
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=_SCREAMING_SNAKE_CASE )
return filename
# FILE_CONTENT + files
_UpperCamelCase = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Dict = tmp_path_factory.mktemp('''data''' ) / '''file.txt'''
__lowerCAmelCase : Dict = FILE_CONTENT
with open(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return filename
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
import bza
__lowerCAmelCase : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2'''
__lowerCAmelCase : List[str] = bytes(_SCREAMING_SNAKE_CASE , '''utf-8''' )
with bza.open(_SCREAMING_SNAKE_CASE , '''wb''' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
import gzip
__lowerCAmelCase : Any = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
__lowerCAmelCase : int = bytes(_SCREAMING_SNAKE_CASE , '''utf-8''' )
with gzip.open(_SCREAMING_SNAKE_CASE , '''wb''' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4'''
__lowerCAmelCase : Union[str, Any] = bytes(_SCREAMING_SNAKE_CASE , '''utf-8''' )
with lza.frame.open(_SCREAMING_SNAKE_CASE , '''wb''' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ , lowercase__ ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__lowerCAmelCase : Any = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(_SCREAMING_SNAKE_CASE , '''w''' ) as archive:
archive.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ , lowercase__ ):
import tarfile
__lowerCAmelCase : str = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar'''
with tarfile.TarFile(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.add(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
import lzma
__lowerCAmelCase : int = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz'''
__lowerCAmelCase : Optional[Any] = bytes(_SCREAMING_SNAKE_CASE , '''utf-8''' )
with lzma.open(_SCREAMING_SNAKE_CASE , '''wb''' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ , lowercase__ ):
import zipfile
__lowerCAmelCase : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip'''
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__lowerCAmelCase : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst'''
__lowerCAmelCase : Optional[int] = bytes(_SCREAMING_SNAKE_CASE , '''utf-8''' )
with zstd.open(_SCREAMING_SNAKE_CASE , '''wb''' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : List[str] = tmp_path_factory.mktemp('''data''' ) / '''file.xml'''
__lowerCAmelCase : List[str] = textwrap.dedent(
'''\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>''' )
with open(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return filename
_UpperCamelCase = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
_UpperCamelCase = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
_UpperCamelCase = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
_UpperCamelCase = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
_UpperCamelCase = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='''session''' )
def _lowercase ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : int = datasets.Dataset.from_dict(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(_SCREAMING_SNAKE_CASE ) ) as con:
__lowerCAmelCase : Dict = con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(_SCREAMING_SNAKE_CASE , '''w''' , newline='''''' ) as f:
__lowerCAmelCase : List[Any] = csv.DictWriter(_SCREAMING_SNAKE_CASE , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(_SCREAMING_SNAKE_CASE , '''w''' , newline='''''' ) as f:
__lowerCAmelCase : Optional[int] = csv.DictWriter(_SCREAMING_SNAKE_CASE , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ , lowercase__ ):
import bza
__lowerCAmelCase : Any = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2'''
with open(_SCREAMING_SNAKE_CASE , '''rb''' ) as f:
__lowerCAmelCase : List[Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_SCREAMING_SNAKE_CASE , '''wb''' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : List[str] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join('''main_dir''' , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join('''main_dir''' , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
__lowerCAmelCase : List[str] = pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(_SCREAMING_SNAKE_CASE , '''wb''' ) as f:
__lowerCAmelCase : Optional[Any] = pq.ParquetWriter(_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_SCREAMING_SNAKE_CASE ) )] for k in DATA[0]} , schema=_SCREAMING_SNAKE_CASE )
writer.write_table(_SCREAMING_SNAKE_CASE )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
__lowerCAmelCase : Union[str, Any] = {'''data''': DATA}
with open(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : int = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
__lowerCAmelCase : Union[str, Any] = {'''data''': DATA_DICT_OF_LISTS}
with open(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ , lowercase__ ):
import gzip
__lowerCAmelCase : Any = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(_SCREAMING_SNAKE_CASE , '''rb''' ) as orig_file:
with gzip.open(_SCREAMING_SNAKE_CASE , '''wb''' ) as zipped_file:
zipped_file.writelines(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ , lowercase__ ):
import gzip
__lowerCAmelCase : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(_SCREAMING_SNAKE_CASE , '''rb''' ) as orig_file:
with gzip.open(_SCREAMING_SNAKE_CASE , '''wb''' ) as zipped_file:
zipped_file.writelines(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : str = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join('''nested''' , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : str = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join('''main_dir''' , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join('''main_dir''' , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : Dict = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.add(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.add(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : int = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.add(_SCREAMING_SNAKE_CASE , arcname=os.path.join('''nested''' , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : List[str] = ['''0''', '''1''', '''2''', '''3''']
__lowerCAmelCase : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : List[Any] = ['''0''', '''1''', '''2''', '''3''']
__lowerCAmelCase : str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Optional[int] = ['''0''', '''1''', '''2''', '''3''']
__lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc'''
with open(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip'''
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : Any = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join('''main_dir''' , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join('''main_dir''' , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : str = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename('''unsupported.ext''' ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : List[Any] = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
__lowerCAmelCase : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( ):
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def _lowercase ( ):
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip'''
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ).replace('''.jpg''' , '''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Optional[Any] = tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 1_0 )
with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 1_0 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 1_0 )
with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 1_0 )
return data_dir
| 492 |
from __future__ import annotations
import math
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
if num <= 0:
_A = F"{num}: Invalid input, please enter a positive integer."
raise ValueError(_SCREAMING_SNAKE_CASE )
_A = [True] * (num + 1)
_A = []
_A = 2
_A = int(math.sqrt(_SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , _SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
_A = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 27 | 0 |
from __future__ import annotations
import math
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> list[int]:
lowercase : str = str(_SCREAMING_SNAKE_CASE )
lowercase : str = [n]
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> bool:
if len(str(_SCREAMING_SNAKE_CASE ) ) > 3:
if not is_prime(int(str(_SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(_SCREAMING_SNAKE_CASE )[:3] ) ):
return False
return True
def _snake_case( SCREAMING_SNAKE_CASE__ = 11 ) -> list[int]:
lowercase : Union[str, Any] = []
lowercase : int = 13
while len(_SCREAMING_SNAKE_CASE ) != count:
if validate(_SCREAMING_SNAKE_CASE ):
lowercase : str = list_truncated_nums(_SCREAMING_SNAKE_CASE )
if all(is_prime(_SCREAMING_SNAKE_CASE ) for i in list_nums ):
list_truncated_primes.append(_SCREAMING_SNAKE_CASE )
num += 2
return list_truncated_primes
def _snake_case( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(11)) = }''')
| 336 |
__A : Dict = "Alexander Joslin"
import operator as op
from .stack import Stack
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
_A = Stack()
_A = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(_SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
_A = operator_stack.peek()
operator_stack.pop()
_A = operand_stack.peek()
operand_stack.pop()
_A = operand_stack.peek()
operand_stack.pop()
_A = operators[opr](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
operand_stack.push(_SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__A : Any = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 27 | 0 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
UpperCamelCase__ : List[Any] = "."
if __name__ == "__main__":
UpperCamelCase__ : Tuple = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Optional[int] = []
with open(doctest_file_path) as fp:
for line in fp:
UpperCamelCase__ : Dict = line.strip()
UpperCamelCase__ : int = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
UpperCamelCase__ : List[Any] = "\n".join(non_existent_paths)
raise ValueError(F"`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}")
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 387 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
_A = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def lowerCAmelCase__ ( self ):
_A = self.dummy_uncond_unet
_A = KarrasVeScheduler()
_A = KarrasVePipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=2 , generator=snake_case_ , output_type='numpy' ).images
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=2 , generator=snake_case_ , output_type='numpy' , return_dict=snake_case_ )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
_A = 'google/ncsnpp-celebahq-256'
_A = UNetaDModel.from_pretrained(snake_case_ )
_A = KarrasVeScheduler()
_A = KarrasVePipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=20 , generator=snake_case_ , output_type='numpy' ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_A = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 27 | 0 |
"""simple docstring"""
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] )->Union[str, Any]:
_UpperCAmelCase = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_UpperCAmelCase = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(snake_case_ ) )
]
_UpperCAmelCase = defaultdict(snake_case_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_UpperCAmelCase = (1 << len(snake_case_ )) - 1
def lowercase__ ( self : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple )->int:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_UpperCAmelCase = self.count_ways_until(snake_case_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
_UpperCAmelCase = total_ways_util
return self.dp[mask][task_no]
def lowercase__ ( self : List[Any] , __UpperCamelCase : Union[str, Any] )->Optional[int]:
# Store the list of persons for each task
for i in range(len(snake_case_ ) ):
for j in task_performed[i]:
self.task[j].append(snake_case_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__A : Optional[int] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__A : Any = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 602 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__A : str = random.Random()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
if rng is None:
_A = global_rng
_A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=400 , snake_case_=2000 , snake_case_=2048 , snake_case_=128 , snake_case_=1 , snake_case_=512 , snake_case_=30 , snake_case_=4_4100 , ):
_A = parent
_A = batch_size
_A = min_seq_length
_A = max_seq_length
_A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A = spectrogram_length
_A = feature_size
_A = num_audio_channels
_A = hop_length
_A = chunk_length
_A = sampling_rate
def lowerCAmelCase__ ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCAmelCase__ ( self , snake_case_=False , snake_case_=False ):
def _flatten(snake_case_ ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
_A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase( __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = TvltFeatureExtractor
def lowerCAmelCase__ ( self ):
_A = TvltFeatureExtractionTester(self )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(snake_case_ , 'feature_size' ) )
self.assertTrue(hasattr(snake_case_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(snake_case_ , 'hop_length' ) )
self.assertTrue(hasattr(snake_case_ , 'chunk_length' ) )
self.assertTrue(hasattr(snake_case_ , 'sampling_rate' ) )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
_A = self.feature_extraction_class.from_pretrained(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(snake_case_ , 'feat_extract.json' )
feat_extract_first.to_json_file(snake_case_ )
_A = self.feature_extraction_class.from_json_file(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
# Initialize feature_extractor
_A = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test not batched input
_A = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A = feature_extractor(
snake_case_ , return_tensors='np' , sampling_rate=4_4100 , mask_audio=snake_case_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A = np.asarray(snake_case_ )
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCAmelCase__ ( self , snake_case_ ):
_A = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_A = ds.sort('id' ).select(range(snake_case_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self ):
_A = self._load_datasamples(1 )
_A = TvltFeatureExtractor()
_A = feature_extractor(snake_case_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case_ , atol=1E-4 ) )
| 27 | 0 |
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase__ = "#"
class lowerCAmelCase__ :
def __init__( self : Optional[int] ):
_snake_case = {}
def lowercase ( self : Dict , _lowerCamelCase : Optional[int] ):
_snake_case = self._trie
for char in text:
if char not in trie:
_snake_case = {}
_snake_case = trie[char]
_snake_case = True
def lowercase ( self : str , _lowerCamelCase : Optional[Any] ):
_snake_case = self._trie
for char in prefix:
if char in trie:
_snake_case = trie[char]
else:
return []
return self._elements(snake_case_ )
def lowercase ( self : str , _lowerCamelCase : Optional[Any] ):
_snake_case = []
for c, v in d.items():
_snake_case = [''' '''] if c == END else [(c + s) for s in self._elements(snake_case_ )]
result.extend(snake_case_ )
return tuple(snake_case_ )
UpperCAmelCase__ = Trie()
UpperCAmelCase__ = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def _UpperCAmelCase ( __lowerCamelCase : int ) -> tuple:
_snake_case = trie.find_word(_SCREAMING_SNAKE_CASE )
return tuple(string + word for word in suffixes )
def _UpperCAmelCase ( ) -> None:
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 224 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('check_bouncy() accepts only integer arguments' )
_A = str(_SCREAMING_SNAKE_CASE )
_A = ''.join(sorted(_SCREAMING_SNAKE_CASE ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError('solution() only accepts values from 0 to 100' )
_A = 0
_A = 1
while True:
if check_bouncy(_SCREAMING_SNAKE_CASE ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"{solution(99)}")
| 27 | 0 |
import datasets
from .evaluate import evaluate
a_ = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
a_ = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
a_ = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def __UpperCamelCase ( self : Any , a : Dict , a : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
SCREAMING_SNAKE_CASE : Dict = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE : Any = evaluate(dataset=snake_case_ , predictions=snake_case_ )
return score | 25 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"{price_plus_tax(100, 0.2_5) = }")
print(f"{price_plus_tax(1_2_5.5_0, 0.0_5) = }")
| 27 | 0 |
'''simple docstring'''
import os
import pytest
from attr import dataclass
lowerCamelCase : int = "us-east-1" # defaults region
@dataclass
class A__ :
A__ = 42
A__ = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
A__ = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_00,
'save_steps': 55_00,
}
A__ = {**hyperparameters, 'max_steps': 10_00}
@property
def A ( self : int ) -> str:
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def A ( self : Optional[Any] ) -> int:
'''simple docstring'''
return f"{self.framework}-transfromers-test"
@property
def A ( self : Optional[Any] ) -> int:
'''simple docstring'''
return f"./tests/sagemaker/scripts/{self.framework}"
@property
def A ( self : List[Any] ) -> int:
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =SageMakerTestEnvironment(framework=request.cls.framework )
| 405 |
from collections.abc import Callable
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
_A = a
_A = b
if function(_SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(_SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
_A = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) < 0:
_A = mid
else:
_A = mid
_A = start + (end - start) / 2.0
return mid
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 27 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase_ ( __snake_case ):
__lowerCamelCase : int = ["image_processor", "tokenizer"]
__lowerCamelCase : int = "FlavaImageProcessor"
__lowerCamelCase : Dict = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , snake_case_ , )
_lowerCAmelCase = kwargs.pop("feature_extractor" )
_lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(snake_case_ , snake_case_ )
_lowerCAmelCase = self.image_processor
def __call__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = True , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Union[str, Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_lowerCAmelCase = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
if images is not None:
_lowerCAmelCase = self.image_processor(
snake_case_ , return_image_mask=snake_case_ , return_codebook_pixels=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
if text is not None and images is not None:
encoding.update(snake_case_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _snake_case ( self ) -> Optional[int]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , snake_case_ , )
return self.image_processor_class
@property
def _snake_case ( self ) -> Any:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , snake_case_ , )
return self.image_processor
| 18 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def lowerCAmelCase__ ( self ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = NystromformerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
_A = model(snake_case_ , token_type_ids=snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = NystromformerForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = NystromformerForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = NystromformerForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = NystromformerForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_choices
_A = NystromformerForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self ):
_A = self.prepare_config_and_inputs()
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = config_and_inputs
_A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def lowerCAmelCase__ ( self ):
_A = NystromformerModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A = type
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowerCAmelCase__ ( self ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = NystromformerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ):
_A = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
_A = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_A = model(snake_case_ )[0]
_A = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self ):
_A = 'the [MASK] of Belgium is Brussels'
_A = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
_A = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
_A = tokenizer(snake_case_ , return_tensors='pt' )
with torch.no_grad():
_A = model(encoding.input_ids ).logits
_A = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case_ ) , 'capital' )
| 27 | 0 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class a ( __snake_case ):
SCREAMING_SNAKE_CASE__ : Any = ''''''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
SCREAMING_SNAKE_CASE__ : int = None # compression type in fsspec. ex: "gzip"
SCREAMING_SNAKE_CASE__ : Optional[Any] = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , _lowerCAmelCase = "" , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase ):
"""simple docstring"""
super().__init__(self , **snake_case_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__SCREAMING_SNAKE_CASE: str = fsspec.open(
snake_case_ , mode='''rb''' , protocol=snake_case_ , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__SCREAMING_SNAKE_CASE: Optional[Any] = os.path.basename(self.file.path.split('''::''' )[0] )
__SCREAMING_SNAKE_CASE: int = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
__SCREAMING_SNAKE_CASE: Any = None
@classmethod
def snake_case_ ( cls , _lowerCAmelCase ):
"""simple docstring"""
return super()._strip_protocol(snake_case_ ).lstrip('''/''' )
def snake_case_ ( self ):
"""simple docstring"""
if self.dir_cache is None:
__SCREAMING_SNAKE_CASE: int = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
__SCREAMING_SNAKE_CASE: int = {f['''name''']: f}
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
return self.file.open().read()
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = "rb" , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = self._strip_protocol(snake_case_ )
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class a ( __snake_case ):
SCREAMING_SNAKE_CASE__ : Any = '''bz2'''
SCREAMING_SNAKE_CASE__ : str = '''bz2'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''.bz2'''
class a ( __snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = '''gzip'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''gzip'''
SCREAMING_SNAKE_CASE__ : str = '''.gz'''
class a ( __snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''lz4'''
SCREAMING_SNAKE_CASE__ : List[str] = '''lz4'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''.lz4'''
class a ( __snake_case ):
SCREAMING_SNAKE_CASE__ : int = '''xz'''
SCREAMING_SNAKE_CASE__ : Any = '''xz'''
SCREAMING_SNAKE_CASE__ : List[str] = '''.xz'''
class a ( __snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = '''zstd'''
SCREAMING_SNAKE_CASE__ : Dict = '''zstd'''
SCREAMING_SNAKE_CASE__ : int = '''.zst'''
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = "rb" , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = DEFAULT_BLOCK_SIZE , **_lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
fo=snake_case_ , mode=snake_case_ , target_protocol=snake_case_ , target_options=snake_case_ , block_size=snake_case_ , **snake_case_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__SCREAMING_SNAKE_CASE: str = self.file.__enter__
class a :
def __init__( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = file_
def __enter__( self ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
"""simple docstring"""
self._file.__exit__(*snake_case_ , **snake_case_ )
def __iter__( self ):
"""simple docstring"""
return iter(self._file )
def snake_case_ ( self ):
"""simple docstring"""
return next(self._file )
def __getattr__( self , _lowerCAmelCase ):
"""simple docstring"""
return getattr(self._file , snake_case_ )
def fixed_enter(*_lowerCAmelCase , **_lowerCAmelCase ):
return WrappedFile(_enter(*snake_case_ , **snake_case_ ) )
__SCREAMING_SNAKE_CASE: str = fixed_enter
| 202 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Dict = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 27 | 0 |
'''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ ) ->list:
lowercase_ = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ = True
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ = False
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
__snake_case = [int(x) for x in input().split()]
# inputing elements of the list in one line
__snake_case = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 451 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__A : List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
| 27 | 0 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
"""simple docstring"""
def __init__( self :List[Any] , UpperCamelCase__ :Any , UpperCamelCase__ :Optional[int]=2 , UpperCamelCase__ :Tuple=8 , UpperCamelCase__ :int=True , UpperCamelCase__ :Dict=True , UpperCamelCase__ :Dict=True , UpperCamelCase__ :Optional[Any]=True , UpperCamelCase__ :str=99 , UpperCamelCase__ :Any=16 , UpperCamelCase__ :Any=5 , UpperCamelCase__ :List[Any]=2 , UpperCamelCase__ :int=36 , UpperCamelCase__ :List[Any]="gelu" , UpperCamelCase__ :List[str]=0.0 , UpperCamelCase__ :str=0.0 , UpperCamelCase__ :Any=512 , UpperCamelCase__ :int=16 , UpperCamelCase__ :Tuple=2 , UpperCamelCase__ :int=0.02 , UpperCamelCase__ :Dict=3 , UpperCamelCase__ :Union[str, Any]=4 , UpperCamelCase__ :Optional[int]=None , ):
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self :int ):
_a = self.get_config()
_a = 300
return config
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = self.prepare_config_and_inputs()
_a = True
_a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_ ( self :Dict , UpperCamelCase__ :List[str] , UpperCamelCase__ :int , UpperCamelCase__ :List[Any] , UpperCamelCase__ :Dict , UpperCamelCase__ :Any , UpperCamelCase__ :Dict , UpperCamelCase__ :int ):
_a = MraModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
_a = model(snake_case_ , token_type_ids=snake_case_ )
_a = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self :str , UpperCamelCase__ :Dict , UpperCamelCase__ :Dict , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :int , UpperCamelCase__ :int , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :Dict , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :Union[str, Any] , ):
_a = True
_a = MraModel(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )
_a = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , encoder_hidden_states=snake_case_ , )
_a = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self :Tuple , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Tuple , UpperCamelCase__ :Dict , UpperCamelCase__ :List[Any] , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :int , UpperCamelCase__ :str ):
_a = MraForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self :int , UpperCamelCase__ :str , UpperCamelCase__ :int , UpperCamelCase__ :Tuple , UpperCamelCase__ :int , UpperCamelCase__ :List[Any] , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Tuple ):
_a = MraForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :List[Any] , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :str , UpperCamelCase__ :Any , UpperCamelCase__ :str , UpperCamelCase__ :List[str] ):
_a = self.num_labels
_a = MraForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self :Tuple , UpperCamelCase__ :List[Any] , UpperCamelCase__ :Dict , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :List[Any] , UpperCamelCase__ :List[str] ):
_a = self.num_labels
_a = MraForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self :str , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :int , UpperCamelCase__ :List[str] , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Tuple , UpperCamelCase__ :Dict , UpperCamelCase__ :Dict ):
_a = self.num_choices
_a = MraForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Any = False
lowerCAmelCase_ : Optional[int] = ()
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = MraModelTester(self )
_a = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self :int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a = type
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = MraModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip(reason="MRA does not output attentions" )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
return
@require_torch
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
_a = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
_a = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_a = model(snake_case_ )[0]
_a = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , snake_case_ )
_a = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
_a = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_a = model(snake_case_ )[0]
_a = 50_265
_a = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , snake_case_ )
_a = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
_a = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
_a = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
_a = model(snake_case_ )[0]
_a = 50_265
_a = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , snake_case_ )
_a = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
| 388 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def __lowerCAmelCase( ) -> None:
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 27 | 0 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_UpperCamelCase = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
_UpperCamelCase = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _lowercase ( ):
__lowerCAmelCase : List[str] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__lowerCAmelCase : Optional[Any] = bs[:]
__lowerCAmelCase : Optional[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
__lowerCAmelCase : Tuple = [chr(_SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : int = set()
__lowerCAmelCase : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase : List[str] = char
return pairs
class __lowercase (__snake_case ):
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , A_ , A_ , A_="replace" , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=False , **A_ , ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : str = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
__lowerCAmelCase : Optional[int] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
__lowerCAmelCase : Optional[int] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
__lowerCAmelCase : Any = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
__lowerCAmelCase : Union[str, Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
__lowerCAmelCase : Tuple = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase : Union[str, Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , )
with open(snake_case_ , encoding='''utf-8''' ) as vocab_handle:
__lowerCAmelCase : List[Any] = json.load(snake_case_ )
__lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
__lowerCAmelCase : int = errors # how to handle errors in decoding
__lowerCAmelCase : Optional[int] = bytes_to_unicode()
__lowerCAmelCase : Any = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case_ , encoding='''utf-8''' ) as merges_handle:
__lowerCAmelCase : Union[str, Any] = merges_handle.read().split('''\n''' )[1:-1]
__lowerCAmelCase : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCAmelCase : int = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__lowerCAmelCase : str = {}
__lowerCAmelCase : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCAmelCase : List[Any] = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
return len(self.encoder )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , A_ ) ->Optional[int]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowerCAmelCase : Optional[int] = tuple(snake_case_ )
__lowerCAmelCase : List[Any] = get_pairs(snake_case_ )
if not pairs:
return token
while True:
__lowerCAmelCase : Dict = min(snake_case_ , key=lambda A_ : self.bpe_ranks.get(snake_case_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase, __lowerCAmelCase : Optional[int] = bigram
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : str = 0
while i < len(snake_case_ ):
try:
__lowerCAmelCase : Tuple = word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCAmelCase : Tuple = j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase : Optional[int] = tuple(snake_case_ )
__lowerCAmelCase : Optional[Any] = new_word
if len(snake_case_ ) == 1:
break
else:
__lowerCAmelCase : int = get_pairs(snake_case_ )
__lowerCAmelCase : Tuple = ''' '''.join(snake_case_ )
__lowerCAmelCase : Union[str, Any] = word
return word
def UpperCamelCase__ ( self , A_ ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Any = []
for token in re.findall(self.pat , snake_case_ ):
__lowerCAmelCase : List[Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case_ ).split(''' ''' ) )
return bpe_tokens
def UpperCamelCase__ ( self , A_ ) ->List[Any]:
'''simple docstring'''
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self , A_ ) ->Optional[int]:
'''simple docstring'''
return self.decoder.get(snake_case_ )
def UpperCamelCase__ ( self , A_ ) ->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = ''''''.join(snake_case_ )
__lowerCAmelCase : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def UpperCamelCase__ ( self , A_ , A_ = None ) ->Dict:
'''simple docstring'''
if not os.path.isdir(snake_case_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase : Optional[Any] = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase : Dict = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + '''\n''' )
__lowerCAmelCase : int = 0
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__lowerCAmelCase : Union[str, Any] = token_index
writer.write(''' '''.join(snake_case_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self , A_ , A_ = None , A_ = False ) ->int:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def UpperCamelCase__ ( self , A_ , A_ = None ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
__lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self , A_ , A_=False , **A_ ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : str = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case_ ) > 0 and not text[0].isspace()):
__lowerCAmelCase : Optional[int] = ''' ''' + text
return (text, kwargs)
def UpperCamelCase__ ( self , A_ , A_ = None ) ->List[Any]:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self , A_ ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(snake_case_ )
__lowerCAmelCase : Any = ''' '''.join(snake_case_ )
__lowerCAmelCase : int = self.encode(snake_case_ )
if len(snake_case_ ) > self.model_max_length:
__lowerCAmelCase : Optional[int] = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 492 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , ):
_A = parent
_A = 13
_A = 7
_A = True
_A = True
_A = True
_A = 99
_A = 32
_A = 2
_A = 4
_A = 37
_A = 'gelu'
_A = 0.1
_A = 0.1
_A = 512
_A = 16
_A = 2
_A = 0.02
_A = 3
_A = 4
_A = None
def lowerCAmelCase__ ( self ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = self.prepare_config_and_inputs()
_A = True
_A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = TFEsmModel(config=snake_case_ )
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
_A = model(snake_case_ )
_A = [input_ids, input_mask]
_A = model(snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_A = True
_A = TFEsmModel(config=snake_case_ )
_A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
_A = model(snake_case_ )
_A = [input_ids, input_mask]
_A = model(snake_case_ , encoder_hidden_states=snake_case_ )
# Also check the case where encoder outputs are not passed
_A = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = TFEsmForMaskedLM(config=snake_case_ )
_A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = TFEsmForTokenClassification(config=snake_case_ )
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
_A = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self ):
_A = self.prepare_config_and_inputs()
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = config_and_inputs
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__magic_name__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def lowerCAmelCase__ ( self ):
_A = TFEsmModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowerCAmelCase__ ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFEsmModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip('Protein models do not support embedding resizing.' )
def lowerCAmelCase__ ( self ):
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
_A, _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(snake_case_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_A = model.get_bias()
assert isinstance(snake_case_ , snake_case_ )
for k, v in name.items():
assert isinstance(snake_case_ , tf.Variable )
else:
_A = model.get_output_embeddings()
assert x is None
_A = model.get_bias()
assert name is None
@require_tf
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ):
_A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
_A = tf.constant([[0, 1, 2, 3, 4, 5]] )
_A = model(snake_case_ )[0]
_A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , snake_case_ )
# compare the actual values for a slice.
_A = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def lowerCAmelCase__ ( self ):
_A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
_A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_A = model(snake_case_ )[0]
# compare the actual values for a slice.
_A = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 27 | 0 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
lowercase : Any = 0
lowercase : Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
lowercase : int = tuple[int, int]
class __snake_case :
def __init__( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : List[Any] = pos_x
lowercase : Tuple = pos_y
lowercase : Dict = (pos_y, pos_x)
lowercase : Optional[Any] = goal_x
lowercase : Tuple = goal_y
lowercase : Optional[int] = g_cost
lowercase : Dict = parent
lowercase : Union[str, Any] = self.calculate_heuristic()
lowercase : str = self.g_cost + self.h_cost
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.pos_x - self.goal_x
lowercase : Optional[Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case_ ) + abs(snake_case_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self ,snake_case ):
'''simple docstring'''
return self.f_cost < other.f_cost
class __snake_case :
def __init__( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,snake_case_ )
lowercase : Optional[Any] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99999 ,snake_case_ )
lowercase : Tuple = [self.start]
lowercase : Tuple = []
lowercase : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowercase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case_ )
self.closed_nodes.append(snake_case_ )
lowercase : List[str] = self.get_successors(snake_case_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
lowercase : Optional[int] = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case_ )
else:
self.open_nodes.append(snake_case_ )
return [self.start.pos]
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Any = []
for action in delta:
lowercase : Optional[Any] = parent.pos_x + action[1]
lowercase : str = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case_ ,snake_case_ ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,snake_case_ ,) )
return successors
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Dict = node
lowercase : Union[str, Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowercase : int = current_node.parent
path.reverse()
return path
class __snake_case :
def __init__( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : List[str] = AStar(snake_case_ ,snake_case_ )
lowercase : Optional[Any] = AStar(snake_case_ ,snake_case_ )
lowercase : Union[str, Any] = False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowercase : Optional[int] = self.fwd_astar.open_nodes.pop(0 )
lowercase : Optional[int] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case_ ,snake_case_ )
self.fwd_astar.closed_nodes.append(snake_case_ )
self.bwd_astar.closed_nodes.append(snake_case_ )
lowercase : Dict = current_bwd_node
lowercase : Union[str, Any] = current_fwd_node
lowercase : Tuple = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case_ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
lowercase : Dict = astar.open_nodes.pop(
astar.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case_ )
else:
astar.open_nodes.append(snake_case_ )
return [self.fwd_astar.start.pos]
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Union[str, Any] = self.fwd_astar.retrace_path(snake_case_ )
lowercase : Dict = self.bwd_astar.retrace_path(snake_case_ )
bwd_path.pop()
bwd_path.reverse()
lowercase : List[str] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
lowercase : str = (0, 0)
lowercase : int = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowercase : Union[str, Any] = time.time()
lowercase : Any = AStar(init, goal)
lowercase : Optional[Any] = a_star.search()
lowercase : Dict = time.time() - start_time
print(F'''AStar execution time = {end_time:f} seconds''')
lowercase : Tuple = time.time()
lowercase : str = BidirectionalAStar(init, goal)
lowercase : Union[str, Any] = time.time() - bd_start_time
print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 336 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = filter(lambda _SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() )
_A = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__A : Union[str, Any] = logging.getLogger(__name__)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if metric == "rouge2":
_A = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_A = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_A = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_A = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_A = ModelCheckpoint(
dirpath=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , )
class lowerCamelCase( pl.Callback ):
'''simple docstring'''
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
_A = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case_ )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=True ):
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****" )
_A = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_A = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A = od / 'test_results.txt'
_A = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_A = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=snake_case_ )
generations_file.parent.mkdir(exist_ok=snake_case_ )
with open(snake_case_ , 'a+' ) as writer:
for key in sorted(snake_case_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_A = metrics[key]
if isinstance(snake_case_ , torch.Tensor ):
_A = val.item()
_A = F"{key}: {val:.6f}\n"
writer.write(snake_case_ )
if not save_generations:
return
if "preds" in metrics:
_A = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(snake_case_ )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
try:
_A = pl_module.model.model.num_parameters()
except AttributeError:
_A = pl_module.model.num_parameters()
_A = count_trainable_parameters(snake_case_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case_ , snake_case_ , 'test' )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 27 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> np.ndarray:
"""simple docstring"""
a = XGBRegressor(verbosity=0, random_state=4_2 )
xgb.fit(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
# Predict target for test data
a = xgb.predict(_SCREAMING_SNAKE_CASE )
a = predictions.reshape(len(_SCREAMING_SNAKE_CASE ), 1 )
return predictions
def SCREAMING_SNAKE_CASE__ ( ) -> None:
"""simple docstring"""
a = fetch_california_housing()
a , a = data_handling(_SCREAMING_SNAKE_CASE )
a , a , a , a = train_test_split(
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, test_size=0.25, random_state=1 )
a = xgboost(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )}""" )
print(f"""Mean Square Error : {mean_squared_error(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 387 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = [[float('inf' ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_A = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_SCREAMING_SNAKE_CASE ):
# looping through rows of graph array
for i in range(_SCREAMING_SNAKE_CASE ):
# looping through columns of graph array
for j in range(_SCREAMING_SNAKE_CASE ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_A = dist[i][k] + dist[k][j]
_print_dist(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return dist, v
if __name__ == "__main__":
__A : Dict = int(input("Enter number of vertices: "))
__A : Union[str, Any] = int(input("Enter number of edges: "))
__A : List[str] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
__A : List[Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
__A : Union[str, Any] = int(input("Enter source:"))
__A : List[str] = int(input("Enter destination:"))
__A : Union[str, Any] = float(input("Enter weight:"))
__A : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 27 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
"""simple docstring"""
def __init__( self : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Tuple=1_3 , __UpperCamelCase : str=7 , __UpperCamelCase : str=True , __UpperCamelCase : int=True , __UpperCamelCase : int=True , __UpperCamelCase : str=True , __UpperCamelCase : Any=9_9 , __UpperCamelCase : Optional[int]=3_2 , __UpperCamelCase : Optional[int]=5 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : Union[str, Any]=3_7 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : Any=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Any=5_1_2 , __UpperCamelCase : List[str]=1_6 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Optional[Any]=0.0_2 , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : int=4 , __UpperCamelCase : Union[str, Any]=None , )->Union[str, Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowercase__ ( self : Dict )->Tuple:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int )->List[str]:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] )->Optional[Any]:
_UpperCAmelCase = NystromformerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
_UpperCAmelCase = model(snake_case_ , token_type_ids=snake_case_ )
_UpperCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[str] , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str )->Optional[int]:
_UpperCAmelCase = NystromformerForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : int )->int:
_UpperCAmelCase = NystromformerForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple )->Union[str, Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = NystromformerForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : str )->List[str]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = NystromformerForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : List[Any] )->Any:
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = NystromformerForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : List[Any] )->List[str]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _a ( __snake_case , __snake_case , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : int )->Tuple:
_UpperCAmelCase = NystromformerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def lowercase__ ( self : List[Any] )->Dict:
self.config_tester.run_common_tests()
def lowercase__ ( self : List[str] )->Optional[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowercase__ ( self : List[Any] )->List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*snake_case_ )
def lowercase__ ( self : Optional[Any] )->int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowercase__ ( self : int )->Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def lowercase__ ( self : str )->Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def lowercase__ ( self : Any )->Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def lowercase__ ( self : int )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowercase__ ( self : int )->Optional[Any]:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = NystromformerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase__ ( self : str )->Union[str, Any]:
_UpperCAmelCase = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
_UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_UpperCAmelCase = model(snake_case_ )[0]
_UpperCAmelCase = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1e-4 ) )
@slow
def lowercase__ ( self : Tuple )->Tuple:
_UpperCAmelCase = '''the [MASK] of Belgium is Brussels'''
_UpperCAmelCase = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
_UpperCAmelCase = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
_UpperCAmelCase = tokenizer(snake_case_ , return_tensors='''pt''' )
with torch.no_grad():
_UpperCAmelCase = model(encoding.input_ids ).logits
_UpperCAmelCase = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case_ ) , '''capital''' )
| 602 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__A : Optional[int] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
if subparsers is not None:
_A = subparsers.add_parser('tpu-config' , description=_description )
else:
_A = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
_A = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=_SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=_SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
_A = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=_SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_SCREAMING_SNAKE_CASE ):
_A = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_A = defaults.command_file
if not args.command and defaults.commands is not None:
_A = defaults.commands
if not args.tpu_name:
_A = defaults.tpu_name
if not args.tpu_zone:
_A = defaults.tpu_zone
if args.accelerate_version == "dev":
_A = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
_A = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , _SCREAMING_SNAKE_CASE ):
_A = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
_A = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _SCREAMING_SNAKE_CASE ):
_A = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_A = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
_A = '; '.join(_SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_A = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(_SCREAMING_SNAKE_CASE )}" )
return
subprocess.run(_SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def __lowerCAmelCase( ) -> Tuple:
"""simple docstring"""
_A = tpu_command_parser()
_A = parser.parse_args()
tpu_command_launcher(_SCREAMING_SNAKE_CASE )
| 27 | 0 |
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class lowerCAmelCase__ :
__a = 42
__a = None
__a = None
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> bool:
def is_valid_tree(__lowerCamelCase : Any ) -> bool:
if node is None:
return True
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _SCREAMING_SNAKE_CASE , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _SCREAMING_SNAKE_CASE )
)
return is_binary_search_tree_recursive_check(_SCREAMING_SNAKE_CASE , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 224 |
from ... import PretrainedConfig
__A : Optional[Any] = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__magic_name__ = 'nezha'
def __init__( self , snake_case_=2_1128 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=64 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.1 , snake_case_=0 , snake_case_=2 , snake_case_=3 , snake_case_=True , **snake_case_ , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = max_relative_position
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = classifier_dropout
_A = use_cache
| 27 | 0 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , a : Any , a : List[str]=13 , a : List[str]=30 , a : int=2 , a : str=3 , a : Optional[Any]=True , a : Optional[Any]=True , a : Any=32 , a : int=5 , a : Optional[int]=4 , a : int=37 , a : Tuple="gelu" , a : Any=0.1 , a : Any=0.1 , a : List[str]=10 , a : List[str]=0.02 , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE : Tuple = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE : Tuple = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : Union[str, Any] = num_patches + 1
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Any = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, pixel_values
def __UpperCamelCase ( self : int , a : Dict , a : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = FlaxViTModel(config=snake_case_ )
SCREAMING_SNAKE_CASE : List[Any] = model(snake_case_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE : int = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE : int = (self.patch_size, self.patch_size)
SCREAMING_SNAKE_CASE : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def __UpperCamelCase ( self : List[Any] , a : str , a : Dict ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = FlaxViTForImageClassification(config=snake_case_ )
SCREAMING_SNAKE_CASE : str = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : Tuple = FlaxViTForImageClassification(snake_case_ )
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(snake_case_ )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,
) : int = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _UpperCamelCase ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = FlaxViTModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(snake_case_ )
SCREAMING_SNAKE_CASE : str = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE : str = model_class(snake_case_ )
@jax.jit
def model_jitted(a : Tuple , **a : Optional[Any] ):
return model(pixel_values=snake_case_ , **snake_case_ )
with self.subTest("JIT Enabled" ):
SCREAMING_SNAKE_CASE : List[str] = model_jitted(**snake_case_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : List[Any] = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class_name.from_pretrained("google/vit-base-patch16-224" )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case_ ) | 25 |
from collections import defaultdict
from math import ceil, sqrt
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 1_000_000 , _SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
_A = defaultdict(_SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_A = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_A = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"{solution() = }")
| 27 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase : Any = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase : Union[str, Any] = {
"squeezebert/squeezebert-uncased": 5_1_2,
"squeezebert/squeezebert-mnli": 5_1_2,
"squeezebert/squeezebert-mnli-headless": 5_1_2,
}
lowerCamelCase : List[str] = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class A__ ( __snake_case ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_INIT_CONFIGURATION
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = SqueezeBertTokenizer
def __init__( self : str , _a : Tuple=None , _a : Tuple=None , _a : str=True , _a : List[str]="[UNK]" , _a : int="[SEP]" , _a : int="[PAD]" , _a : int="[CLS]" , _a : Union[str, Any]="[MASK]" , _a : Any=True , _a : List[str]=None , **_a : Any , ) -> str:
'''simple docstring'''
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , snake_case_ ) != do_lower_case
or normalizer_state.get('strip_accents' , snake_case_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , snake_case_ ) != tokenize_chinese_chars
):
_SCREAMING_SNAKE_CASE =getattr(snake_case_ , normalizer_state.pop('type' ) )
_SCREAMING_SNAKE_CASE =do_lower_case
_SCREAMING_SNAKE_CASE =strip_accents
_SCREAMING_SNAKE_CASE =tokenize_chinese_chars
_SCREAMING_SNAKE_CASE =normalizer_class(**snake_case_ )
_SCREAMING_SNAKE_CASE =do_lower_case
def A ( self : Optional[Any] , _a : Optional[Any] , _a : Tuple=None ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : Any , _a : Optional[int] , _a : List[Any] = None ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Tuple , _a : List[str] , _a : str = None ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 405 |
from math import pi, sqrt, tan
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
_A = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
_A = (sidea + sidea + sidea) / 2
_A = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 27 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 |
import numpy as np
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> np.array:
"""simple docstring"""
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 | 0 |
from math import ceil, sqrt
def lowerCAmelCase ( UpperCamelCase__ : str = 1_000_000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__SCREAMING_SNAKE_CASE: Optional[Any] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__SCREAMING_SNAKE_CASE: Union[str, Any] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 202 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 27 | 0 |
'''simple docstring'''
from __future__ import annotations
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
lowercase_ = len(_SCREAMING_SNAKE_CASE ) // 2
# choose the middle 3 elements
lowercase_ = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 451 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__A : List[Any] = "http://www.mocksite.com/file1.txt"
__A : List[Any] = "\"text\": [\"foo\", \"foo\"]"
__A : Dict = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class lowerCamelCase:
'''simple docstring'''
__magic_name__ = 200
__magic_name__ = {'Content-Length': '100'}
__magic_name__ = {}
def lowerCAmelCase__ ( self , **snake_case_ ):
return [bytes(snake_case_ , 'utf-8' )]
def __lowerCAmelCase( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
import requests
monkeypatch.setattr(_SCREAMING_SNAKE_CASE , 'request' , _SCREAMING_SNAKE_CASE )
_A = URL
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = url
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [url]
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = {'train': url}
_A = 'dummy'
_A = 'downloads'
_A = tmp_path
_A = DownloadConfig(
cache_dir=os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , use_etag=_SCREAMING_SNAKE_CASE , )
_A = DownloadManager(dataset_name=_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
_A = dl_manager.download(_SCREAMING_SNAKE_CASE )
_A = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [downloaded_paths]
_A = [urls]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert "train" in downloaded_paths.keys()
_A = downloaded_paths.values()
_A = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_A = Path(_SCREAMING_SNAKE_CASE )
_A = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_A = downloaded_path.read_text()
assert content == CONTENT
_A = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_A = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
_A = str(_SCREAMING_SNAKE_CASE )
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = filename
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [filename]
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = {'train': filename}
_A = 'dummy'
_A = xz_file.parent
_A = 'extracted'
_A = DownloadConfig(
cache_dir=_SCREAMING_SNAKE_CASE , use_etag=_SCREAMING_SNAKE_CASE , )
_A = DownloadManager(dataset_name=_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
_A = dl_manager.extract(_SCREAMING_SNAKE_CASE )
_A = paths
for extracted_paths in [extracted_paths]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [extracted_paths]
_A = [paths]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert "train" in extracted_paths.keys()
_A = extracted_paths.values()
_A = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_A = Path(_SCREAMING_SNAKE_CASE )
_A = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_SCREAMING_SNAKE_CASE , etag=_SCREAMING_SNAKE_CASE )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_A = extracted_path.read_text()
_A = text_file.read_text()
assert extracted_file_content == expected_file_content
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
assert path.endswith('.jsonl' )
for num_items, line in enumerate(_SCREAMING_SNAKE_CASE , start=1 ):
_A = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = request.getfixturevalue(_SCREAMING_SNAKE_CASE )
_A = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ):
_test_jsonl(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_A = request.getfixturevalue(_SCREAMING_SNAKE_CASE )
_A = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ):
_test_jsonl(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert num_tar == 1
assert num_jsonl == 2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) , start=1 ):
assert os.path.basename(_SCREAMING_SNAKE_CASE ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 27 | 0 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __snake_case :
"""simple docstring"""
def __init__( self :Optional[Any] , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Dict=13 , UpperCamelCase__ :Union[str, Any]=30 , UpperCamelCase__ :str=2 , UpperCamelCase__ :Optional[int]=3 , UpperCamelCase__ :List[Any]=True , UpperCamelCase__ :List[Any]=True , UpperCamelCase__ :Optional[Any]=32 , UpperCamelCase__ :Union[str, Any]=5 , UpperCamelCase__ :Optional[int]=4 , UpperCamelCase__ :Optional[Any]=37 , UpperCamelCase__ :Union[str, Any]="gelu" , UpperCamelCase__ :List[Any]=0.1 , UpperCamelCase__ :Union[str, Any]=0.1 , UpperCamelCase__ :List[Any]=10 , UpperCamelCase__ :Tuple=0.02 , UpperCamelCase__ :Optional[Any]=3 , UpperCamelCase__ :List[str]=0.6 , UpperCamelCase__ :Tuple=None , ):
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = type_sequence_label_size
_a = initializer_range
_a = mask_ratio
_a = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_a = (image_size // patch_size) ** 2
_a = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE_ ( self :int ):
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self :int ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :List[Any] , UpperCamelCase__ :Dict ):
_a = ViTMAEModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :Dict , UpperCamelCase__ :Tuple ):
_a = ViTMAEForPreTraining(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ )
_a = (self.image_size // self.patch_size) ** 2
_a = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_a = 1
_a = ViTMAEForPreTraining(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a = model(snake_case_ )
_a = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE_ ( self :int ):
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase_ : Any = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : List[Any] = False
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = ViTMAEModelTester(self )
_a = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self :Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_ ( self :int ):
pass
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self :Any ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(snake_case_ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def SCREAMING_SNAKE_CASE_ ( self :int ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case_ )
def SCREAMING_SNAKE_CASE_ ( self :List[str] , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :Tuple , UpperCamelCase__ :Optional[int] ):
# make masks reproducible
np.random.seed(2 )
_a = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_a = torch.from_numpy(snake_case_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_a = pt_noise
super().check_pt_tf_models(snake_case_ , snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE_ ( self :str ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_a = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_a = outputs[0].cpu().numpy()
_a = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
_a = model_class.from_pretrained(snake_case_ )
model.to(snake_case_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_a = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
# Make sure we don't have nans
_a = after_outputs[0].cpu().numpy()
_a = 0
_a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def SCREAMING_SNAKE_CASE_ ( self :str ):
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def SCREAMING_SNAKE_CASE_ ( self :str ):
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def SCREAMING_SNAKE_CASE_ ( self :str ):
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self :str ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = ViTMAEModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def __a ( ):
"""simple docstring"""
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self :str ):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self :int ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_a = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(snake_case_ )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_a = ViTMAEConfig()
_a = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_a = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_a = model(**snake_case_ , noise=torch.from_numpy(snake_case_ ).to(device=snake_case_ ) )
# verify the logits
_a = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_a = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(snake_case_ ) , atol=1E-4 ) )
| 388 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
_A = int(number**0.5 )
return number == sq * sq
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> tuple[int, int]:
"""simple docstring"""
_A = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_A = x_den * y_den * z_den
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 35 ) -> int:
"""simple docstring"""
_A = set()
_A = 42
_A = Fraction(0 )
_A = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_A = x_num * y_den + x_den * y_num
_A = x_den * y_den
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_A = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_A = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
_A = x_num * y_num
_A = x_den * y_num + x_num * y_den
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_A = x_num * x_num * y_num * y_num
_A = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"{solution() = }")
| 27 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_UpperCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowercase (__snake_case ):
_UpperCamelCase = ["""pixel_values"""]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , A_ = True , **A_ , ) ->List[str]:
'''simple docstring'''
super().__init__(**snake_case_ )
__lowerCAmelCase : Optional[int] = size if size is not None else {'''shortest_edge''': 224}
__lowerCAmelCase : Optional[int] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
__lowerCAmelCase : Optional[int] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__lowerCAmelCase : List[str] = get_size_dict(snake_case_ , default_to_square=snake_case_ , param_name='''crop_size''' )
__lowerCAmelCase : Union[str, Any] = do_resize
__lowerCAmelCase : Dict = size
__lowerCAmelCase : int = resample
__lowerCAmelCase : List[str] = do_center_crop
__lowerCAmelCase : List[Any] = crop_size
__lowerCAmelCase : Dict = do_rescale
__lowerCAmelCase : Tuple = rescale_factor
__lowerCAmelCase : Optional[int] = do_normalize
__lowerCAmelCase : List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowerCAmelCase : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
__lowerCAmelCase : Optional[int] = do_convert_rgb
def UpperCamelCase__ ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__lowerCAmelCase : str = get_resize_output_image_size(snake_case_ , size=size['''shortest_edge'''] , default_to_square=snake_case_ )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ = None , **A_ , ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(snake_case_ , size=(size['''height'''], size['''width''']) , data_format=snake_case_ , **snake_case_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ = None , **A_ , ) ->List[str]:
'''simple docstring'''
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ = None , **A_ , ) ->Dict:
'''simple docstring'''
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def UpperCamelCase__ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : List[str] = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase : Tuple = size if size is not None else self.size
__lowerCAmelCase : List[str] = get_size_dict(snake_case_ , param_name='''size''' , default_to_square=snake_case_ )
__lowerCAmelCase : List[str] = resample if resample is not None else self.resample
__lowerCAmelCase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase : Any = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase : str = get_size_dict(snake_case_ , param_name='''crop_size''' , default_to_square=snake_case_ )
__lowerCAmelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase : int = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase : List[Any] = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase : List[str] = image_std if image_std is not None else self.image_std
__lowerCAmelCase : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowerCAmelCase : List[Any] = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowerCAmelCase : str = [convert_to_rgb(snake_case_ ) for image in images]
# All transformations expect numpy arrays.
__lowerCAmelCase : List[str] = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
__lowerCAmelCase : List[Any] = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
__lowerCAmelCase : Optional[Any] = [self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
__lowerCAmelCase : str = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
__lowerCAmelCase : Any = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
__lowerCAmelCase : Tuple = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
__lowerCAmelCase : Any = {'''pixel_values''': images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 492 |
from __future__ import annotations
import math
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
if num <= 0:
_A = F"{num}: Invalid input, please enter a positive integer."
raise ValueError(_SCREAMING_SNAKE_CASE )
_A = [True] * (num + 1)
_A = []
_A = 2
_A = int(math.sqrt(_SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , _SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
_A = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 27 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("""Input value must be a \'int\' type""" )
return bin(_SCREAMING_SNAKE_CASE ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
__A : Dict = "Alexander Joslin"
import operator as op
from .stack import Stack
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
_A = Stack()
_A = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(_SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
_A = operator_stack.peek()
operator_stack.pop()
_A = operand_stack.peek()
operand_stack.pop()
_A = operand_stack.peek()
operand_stack.pop()
_A = operators[opr](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
operand_stack.push(_SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__A : Any = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 27 | 0 |
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
"""simple docstring"""
assert nand_gate(0, 0 ) == 1
assert nand_gate(0, 1 ) == 1
assert nand_gate(1, 0 ) == 1
assert nand_gate(1, 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 387 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
_A = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def lowerCAmelCase__ ( self ):
_A = self.dummy_uncond_unet
_A = KarrasVeScheduler()
_A = KarrasVePipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=2 , generator=snake_case_ , output_type='numpy' ).images
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=2 , generator=snake_case_ , output_type='numpy' , return_dict=snake_case_ )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
_A = 'google/ncsnpp-celebahq-256'
_A = UNetaDModel.from_pretrained(snake_case_ )
_A = KarrasVeScheduler()
_A = KarrasVePipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=20 , generator=snake_case_ , output_type='numpy' ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_A = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 27 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A : str = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["MobileViTFeatureExtractor"]
__A : List[Any] = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 602 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__A : str = random.Random()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
if rng is None:
_A = global_rng
_A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=400 , snake_case_=2000 , snake_case_=2048 , snake_case_=128 , snake_case_=1 , snake_case_=512 , snake_case_=30 , snake_case_=4_4100 , ):
_A = parent
_A = batch_size
_A = min_seq_length
_A = max_seq_length
_A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A = spectrogram_length
_A = feature_size
_A = num_audio_channels
_A = hop_length
_A = chunk_length
_A = sampling_rate
def lowerCAmelCase__ ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCAmelCase__ ( self , snake_case_=False , snake_case_=False ):
def _flatten(snake_case_ ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
_A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase( __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = TvltFeatureExtractor
def lowerCAmelCase__ ( self ):
_A = TvltFeatureExtractionTester(self )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(snake_case_ , 'feature_size' ) )
self.assertTrue(hasattr(snake_case_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(snake_case_ , 'hop_length' ) )
self.assertTrue(hasattr(snake_case_ , 'chunk_length' ) )
self.assertTrue(hasattr(snake_case_ , 'sampling_rate' ) )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
_A = self.feature_extraction_class.from_pretrained(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(snake_case_ , 'feat_extract.json' )
feat_extract_first.to_json_file(snake_case_ )
_A = self.feature_extraction_class.from_json_file(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
# Initialize feature_extractor
_A = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test not batched input
_A = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A = feature_extractor(
snake_case_ , return_tensors='np' , sampling_rate=4_4100 , mask_audio=snake_case_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A = np.asarray(snake_case_ )
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCAmelCase__ ( self , snake_case_ ):
_A = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_A = ds.sort('id' ).select(range(snake_case_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self ):
_A = self._load_datasamples(1 )
_A = TvltFeatureExtractor()
_A = feature_extractor(snake_case_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case_ , atol=1E-4 ) )
| 27 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase__ = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : str=8 ) -> List[str]:
_snake_case = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_snake_case = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase__ ( __snake_case ):
def __init__( self : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , ):
super().__init__()
self.register_modules(
unet=snake_case_ , scheduler=snake_case_ , movq=snake_case_ , )
_snake_case = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowercase ( self : str , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Any ):
if latents is None:
_snake_case = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_snake_case = latents.to(snake_case_ )
_snake_case = latents * scheduler.init_noise_sigma
return latents
def lowercase ( self : List[Any] , _lowerCamelCase : Optional[int]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_snake_case = torch.device(f'''cuda:{gpu_id}''' )
_snake_case = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case_ , snake_case_ )
def lowercase ( self : Tuple , _lowerCamelCase : List[str]=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_snake_case = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=snake_case_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_snake_case = None
for cpu_offloaded_model in [self.unet, self.movq]:
_snake_case , _snake_case = cpu_offload_with_hook(snake_case_ , snake_case_ , prev_module_hook=snake_case_ )
# We'll offload the last model manually.
_snake_case = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowercase ( self : Dict ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case_ )
def __call__( self : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : str = 512 , _lowerCamelCase : Tuple = 512 , _lowerCamelCase : Optional[int] = 100 , _lowerCamelCase : List[Any] = 4.0 , _lowerCamelCase : int = 1 , _lowerCamelCase : Any = None , _lowerCamelCase : List[Any] = None , _lowerCamelCase : List[Any] = "pil" , _lowerCamelCase : str = True , ):
_snake_case = self._execution_device
_snake_case = guidance_scale > 1.0
if isinstance(snake_case_ , snake_case_ ):
_snake_case = torch.cat(snake_case_ , dim=0 )
_snake_case = image_embeds.shape[0] * num_images_per_prompt
if isinstance(snake_case_ , snake_case_ ):
_snake_case = torch.cat(snake_case_ , dim=0 )
if do_classifier_free_guidance:
_snake_case = image_embeds.repeat_interleave(snake_case_ , dim=0 )
_snake_case = negative_image_embeds.repeat_interleave(snake_case_ , dim=0 )
_snake_case = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case_ )
self.scheduler.set_timesteps(snake_case_ , device=snake_case_ )
_snake_case = self.scheduler.timesteps
_snake_case = self.unet.config.in_channels
_snake_case , _snake_case = downscale_height_and_width(snake_case_ , snake_case_ , self.movq_scale_factor )
# create initial latent
_snake_case = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , snake_case_ , snake_case_ , snake_case_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(snake_case_ ) ):
# expand the latents if we are doing classifier free guidance
_snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_snake_case = {'''image_embeds''': image_embeds}
_snake_case = self.unet(
sample=snake_case_ , timestep=snake_case_ , encoder_hidden_states=snake_case_ , added_cond_kwargs=snake_case_ , return_dict=snake_case_ , )[0]
if do_classifier_free_guidance:
_snake_case , _snake_case = noise_pred.split(latents.shape[1] , dim=1 )
_snake_case , _snake_case = noise_pred.chunk(2 )
_snake_case , _snake_case = variance_pred.chunk(2 )
_snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_snake_case = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_snake_case , _snake_case = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_snake_case = self.scheduler.step(
snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ , )[0]
# post-processing
_snake_case = self.movq.decode(snake_case_ , force_not_quantize=snake_case_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_snake_case = image * 0.5 + 0.5
_snake_case = image.clamp(0 , 1 )
_snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_snake_case = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 224 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('check_bouncy() accepts only integer arguments' )
_A = str(_SCREAMING_SNAKE_CASE )
_A = ''.join(sorted(_SCREAMING_SNAKE_CASE ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError('solution() only accepts values from 0 to 100' )
_A = 0
_A = 1
while True:
if check_bouncy(_SCREAMING_SNAKE_CASE ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"{solution(99)}")
| 27 | 0 |
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[int] = int(_SCREAMING_SNAKE_CASE)
if decimal in (0, 1): # Exit cases for the recursion
return str(_SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = divmod(_SCREAMING_SNAKE_CASE , 2)
return binary_recursive(_SCREAMING_SNAKE_CASE) + str(_SCREAMING_SNAKE_CASE)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Tuple = str(_SCREAMING_SNAKE_CASE).strip()
if not number:
raise ValueError("No input value was provided")
SCREAMING_SNAKE_CASE : Optional[Any] = "-" if number.startswith("-") else ""
SCREAMING_SNAKE_CASE : Union[str, Any] = number.lstrip("-")
if not number.isnumeric():
raise ValueError("Input value is not an integer")
return f"{negative}0b{binary_recursive(int(_SCREAMING_SNAKE_CASE))}"
if __name__ == "__main__":
from doctest import testmod
testmod() | 25 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"{price_plus_tax(100, 0.2_5) = }")
print(f"{price_plus_tax(1_2_5.5_0, 0.0_5) = }")
| 27 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A__ ( __snake_case , unittest.TestCase ):
A__ = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def A ( self : List[str] , _a : Union[str, Any]=0 ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =np.random.RandomState(snake_case_ )
_SCREAMING_SNAKE_CASE ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def A ( self : List[Any] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=snake_case_ )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =pipe(**snake_case_ ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_SCREAMING_SNAKE_CASE =np.array([0.6_50_72, 0.5_84_92, 0.4_82_19, 0.5_55_21, 0.5_31_80, 0.5_59_39, 0.5_06_97, 0.3_98_00, 0.4_64_55] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_SCREAMING_SNAKE_CASE =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =pipe(**snake_case_ ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_SCREAMING_SNAKE_CASE =np.array([0.6_58_63, 0.5_94_25, 0.4_93_26, 0.5_63_13, 0.5_38_75, 0.5_66_27, 0.5_10_65, 0.3_97_77, 0.4_63_30] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : str ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_SCREAMING_SNAKE_CASE =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =pipe(**snake_case_ ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_SCREAMING_SNAKE_CASE =np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : str ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_SCREAMING_SNAKE_CASE =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =pipe(**snake_case_ ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_SCREAMING_SNAKE_CASE =np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : int ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_SCREAMING_SNAKE_CASE =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =pipe(**snake_case_ ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_SCREAMING_SNAKE_CASE =np.array([0.5_38_17, 0.6_08_12, 0.4_73_84, 0.4_95_30, 0.5_18_94, 0.4_98_14, 0.4_79_84, 0.3_89_58, 0.4_42_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_SCREAMING_SNAKE_CASE =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =pipe(**snake_case_ ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_SCREAMING_SNAKE_CASE =np.array([0.5_38_95, 0.6_08_08, 0.4_79_33, 0.4_96_08, 0.5_18_86, 0.4_99_50, 0.4_80_53, 0.3_89_57, 0.4_42_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Optional[Any] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=snake_case_ )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =3 * [inputs['prompt']]
# forward
_SCREAMING_SNAKE_CASE =pipe(**snake_case_ )
_SCREAMING_SNAKE_CASE =output.images[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =3 * [inputs.pop('prompt' )]
_SCREAMING_SNAKE_CASE =pipe.tokenizer(
snake_case_ , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=snake_case_ , return_tensors='np' , )
_SCREAMING_SNAKE_CASE =text_inputs['input_ids']
_SCREAMING_SNAKE_CASE =pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
_SCREAMING_SNAKE_CASE =prompt_embeds
# forward
_SCREAMING_SNAKE_CASE =pipe(**snake_case_ )
_SCREAMING_SNAKE_CASE =output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def A ( self : List[Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=snake_case_ )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =3 * ['this is a negative prompt']
_SCREAMING_SNAKE_CASE =negative_prompt
_SCREAMING_SNAKE_CASE =3 * [inputs['prompt']]
# forward
_SCREAMING_SNAKE_CASE =pipe(**snake_case_ )
_SCREAMING_SNAKE_CASE =output.images[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs()
_SCREAMING_SNAKE_CASE =3 * [inputs.pop('prompt' )]
_SCREAMING_SNAKE_CASE =[]
for p in [prompt, negative_prompt]:
_SCREAMING_SNAKE_CASE =pipe.tokenizer(
snake_case_ , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=snake_case_ , return_tensors='np' , )
_SCREAMING_SNAKE_CASE =text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =embeds
# forward
_SCREAMING_SNAKE_CASE =pipe(**snake_case_ )
_SCREAMING_SNAKE_CASE =output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
@property
def A ( self : Tuple ) -> Tuple:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ort.SessionOptions()
_SCREAMING_SNAKE_CASE =False
return options
def A ( self : List[str] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_SCREAMING_SNAKE_CASE ='A painting of a squirrel eating a burger'
np.random.seed(0 )
_SCREAMING_SNAKE_CASE =sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='np' )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_SCREAMING_SNAKE_CASE =np.array([0.04_52, 0.03_90, 0.00_87, 0.03_50, 0.06_17, 0.03_64, 0.05_44, 0.05_23, 0.07_20] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : Tuple ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_SCREAMING_SNAKE_CASE ='open neural network exchange'
_SCREAMING_SNAKE_CASE =np.random.RandomState(0 )
_SCREAMING_SNAKE_CASE =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type='np' )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_SCREAMING_SNAKE_CASE =np.array([0.28_67, 0.19_74, 0.14_81, 0.72_94, 0.72_51, 0.66_67, 0.41_94, 0.56_42, 0.64_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : int ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_SCREAMING_SNAKE_CASE ='open neural network exchange'
_SCREAMING_SNAKE_CASE =np.random.RandomState(0 )
_SCREAMING_SNAKE_CASE =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type='np' )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_SCREAMING_SNAKE_CASE =np.array([0.23_06, 0.19_59, 0.15_93, 0.65_49, 0.63_94, 0.54_08, 0.50_65, 0.60_10, 0.61_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : Any ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =0
def test_callback_fn(_a : List[str] , _a : Any , _a : Tuple ) -> None:
_SCREAMING_SNAKE_CASE =True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_SCREAMING_SNAKE_CASE =latents[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =np.array(
[-0.67_72, -0.38_35, -1.24_56, 0.19_05, -1.09_74, 0.69_67, -1.93_53, 0.01_78, 1.01_67] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_SCREAMING_SNAKE_CASE =latents[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =np.array(
[-0.33_51, 0.22_41, -0.18_37, -0.23_25, -0.65_77, 0.33_93, -0.02_41, 0.58_99, 1.38_75] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
_SCREAMING_SNAKE_CASE ='Andromeda galaxy in a bottle'
_SCREAMING_SNAKE_CASE =np.random.RandomState(0 )
pipe(
prompt=snake_case_ , num_inference_steps=5 , guidance_scale=7.5 , generator=snake_case_ , callback=snake_case_ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def A ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(snake_case_ , snake_case_ )
assert pipe.safety_checker is None
_SCREAMING_SNAKE_CASE =pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case_ )
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionPipeline.from_pretrained(snake_case_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_SCREAMING_SNAKE_CASE =pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
| 405 |
from collections.abc import Callable
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
_A = a
_A = b
if function(_SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(_SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
_A = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) < 0:
_A = mid
else:
_A = mid
_A = start + (end - start) / 2.0
return mid
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 27 | 0 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 32
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] = 16 , SCREAMING_SNAKE_CASE_ : Optional[Any] = "bert-base-cased" ):
'''simple docstring'''
_lowerCAmelCase = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE_ : Any ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCAmelCase = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=_SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_lowerCAmelCase = DataLoader(
tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = DataLoader(
tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def __a(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
model.eval()
_lowerCAmelCase = 0
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowerCAmelCase , _lowerCAmelCase = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
_lowerCAmelCase = metric.compute()
return eval_metric["accuracy"]
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase = config["lr"]
_lowerCAmelCase = int(config["num_epochs"] )
_lowerCAmelCase = int(config["seed"] )
_lowerCAmelCase = int(config["batch_size"] )
_lowerCAmelCase = args.model_name_or_path
set_seed(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
# Instantiate optimizer
_lowerCAmelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowerCAmelCase = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
_lowerCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_lowerCAmelCase = 1
_lowerCAmelCase = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , )
else:
_lowerCAmelCase = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
_lowerCAmelCase = 0
# We also need to keep track of the stating epoch so files are named properly
_lowerCAmelCase = 0
_lowerCAmelCase = evaluate.load("glue" , "mrpc" )
_lowerCAmelCase = num_epochs
if args.partial_train_epoch is not None:
_lowerCAmelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_lowerCAmelCase = args.resume_from_checkpoint.split("epoch_" )[1]
_lowerCAmelCase = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_lowerCAmelCase = int(_SCREAMING_SNAKE_CASE ) + 1
_lowerCAmelCase = evaluation_loop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
accelerator.print("resumed checkpoint performance:" , _SCREAMING_SNAKE_CASE )
accelerator.print("resumed checkpoint\'s scheduler\'s lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers\'s lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , "r" ) as f:
_lowerCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_lowerCAmelCase = {}
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = outputs.loss
_lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_lowerCAmelCase = F'''epoch_{epoch}'''
_lowerCAmelCase = os.path.join(args.output_dir , _SCREAMING_SNAKE_CASE )
accelerator.save_state(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = evaluation_loop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = accuracy
_lowerCAmelCase = lr_scheduler.get_lr()[0]
_lowerCAmelCase = optimizer.param_groups[0]["lr"]
_lowerCAmelCase = epoch
_lowerCAmelCase = overall_step
accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , "w" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __a():
'''simple docstring'''
_lowerCAmelCase = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=_SCREAMING_SNAKE_CASE , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
"--output_dir" , type=_SCREAMING_SNAKE_CASE , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=_SCREAMING_SNAKE_CASE , default=2 , help="Number of train epochs." , )
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 18 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def lowerCAmelCase__ ( self ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = NystromformerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
_A = model(snake_case_ , token_type_ids=snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = NystromformerForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = NystromformerForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = NystromformerForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = NystromformerForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_choices
_A = NystromformerForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self ):
_A = self.prepare_config_and_inputs()
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = config_and_inputs
_A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def lowerCAmelCase__ ( self ):
_A = NystromformerModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A = type
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowerCAmelCase__ ( self ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = NystromformerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ):
_A = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
_A = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_A = model(snake_case_ )[0]
_A = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self ):
_A = 'the [MASK] of Belgium is Brussels'
_A = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
_A = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
_A = tokenizer(snake_case_ , return_tensors='pt' )
with torch.no_grad():
_A = model(encoding.input_ids ).logits
_A = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case_ ) , 'capital' )
| 27 | 0 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : int=3 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : Union[str, Any] = 10 , UpperCamelCase__ : Dict = 2 ) -> List[Any]:
"""simple docstring"""
def get_dataset(UpperCamelCase__ : Tuple ):
__SCREAMING_SNAKE_CASE: Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(_SCREAMING_SNAKE_CASE , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__SCREAMING_SNAKE_CASE: Tuple = get_dataset(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE: Dict = get_dataset(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE: Any = DataLoader(_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , num_workers=4 )
__SCREAMING_SNAKE_CASE: Optional[Any] = DataLoader(_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=None ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = []
for epoch in range(_SCREAMING_SNAKE_CASE ):
# Train quickly
model.train()
for batch in dataloader:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Tuple = batch
__SCREAMING_SNAKE_CASE: Dict = model(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE: Any = torch.nn.functional.mse_loss(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class a ( nn.Module ):
def __init__( self ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE: Any = nn.Parameter(torch.randn(1 ) )
__SCREAMING_SNAKE_CASE: Dict = nn.Parameter(torch.randn(1 ) )
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
return x * self.a + self.b
class a ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__SCREAMING_SNAKE_CASE: Tuple = DummyModel()
__SCREAMING_SNAKE_CASE: Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Any = dummy_dataloaders()
__SCREAMING_SNAKE_CASE: Optional[Any] = ProjectConfiguration(total_limit=1 , project_dir=snake_case_ , automatic_checkpoint_naming=snake_case_ )
# Train baseline
__SCREAMING_SNAKE_CASE: Any = Accelerator(project_config=snake_case_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: List[Any] = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def snake_case_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__SCREAMING_SNAKE_CASE: Union[str, Any] = DummyModel()
__SCREAMING_SNAKE_CASE: Tuple = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[int] = dummy_dataloaders()
# Train baseline
__SCREAMING_SNAKE_CASE: Tuple = Accelerator()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Any = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
__SCREAMING_SNAKE_CASE: Union[str, Any] = os.path.join(snake_case_ , '''initial''' )
accelerator.save_state(snake_case_ )
((__SCREAMING_SNAKE_CASE) ,(__SCREAMING_SNAKE_CASE)): List[Any] = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE: Union[str, Any] = optimizer.state_dict()
__SCREAMING_SNAKE_CASE: Any = train(3 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((__SCREAMING_SNAKE_CASE) ,(__SCREAMING_SNAKE_CASE)): int = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE: Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
__SCREAMING_SNAKE_CASE: str = DummyModel()
__SCREAMING_SNAKE_CASE: List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Union[str, Any] = dummy_dataloaders()
__SCREAMING_SNAKE_CASE: Optional[Any] = Accelerator()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Tuple = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.load_state(snake_case_ )
((__SCREAMING_SNAKE_CASE) ,(__SCREAMING_SNAKE_CASE)): Optional[Any] = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE: int = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
__SCREAMING_SNAKE_CASE: Dict = train(2 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save everything
__SCREAMING_SNAKE_CASE: List[Any] = os.path.join(snake_case_ , '''checkpoint''' )
accelerator.save_state(snake_case_ )
# Load everything back in and make sure all states work
accelerator.load_state(snake_case_ )
test_rands += train(1 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((__SCREAMING_SNAKE_CASE) ,(__SCREAMING_SNAKE_CASE)): Optional[Any] = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE: Union[str, Any] = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def snake_case_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__SCREAMING_SNAKE_CASE: str = DummyModel()
__SCREAMING_SNAKE_CASE: Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[Any] = dummy_dataloaders()
__SCREAMING_SNAKE_CASE: List[Any] = ProjectConfiguration(automatic_checkpoint_naming=snake_case_ )
# Train baseline
__SCREAMING_SNAKE_CASE: Optional[Any] = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[int] = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
accelerator.save_state()
((__SCREAMING_SNAKE_CASE) ,(__SCREAMING_SNAKE_CASE)): str = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE: Any = optimizer.state_dict()
__SCREAMING_SNAKE_CASE: Tuple = train(3 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((__SCREAMING_SNAKE_CASE) ,(__SCREAMING_SNAKE_CASE)): Any = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE: int = optimizer.state_dict()
# Train partially
set_seed(42 )
__SCREAMING_SNAKE_CASE: int = DummyModel()
__SCREAMING_SNAKE_CASE: List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: int = dummy_dataloaders()
__SCREAMING_SNAKE_CASE: Optional[int] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=snake_case_ )
__SCREAMING_SNAKE_CASE: List[str] = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[Any] = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
accelerator.load_state(os.path.join(snake_case_ , '''checkpoints''' , '''checkpoint_0''' ) )
((__SCREAMING_SNAKE_CASE) ,(__SCREAMING_SNAKE_CASE)): Optional[int] = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE: List[Any] = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
__SCREAMING_SNAKE_CASE: List[str] = train(2 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(snake_case_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
((__SCREAMING_SNAKE_CASE) ,(__SCREAMING_SNAKE_CASE)): Optional[Any] = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE: Any = optimizer.state_dict()
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = torch.tensor([1, 2, 3] )
__SCREAMING_SNAKE_CASE: str = torch.tensor([2, 3, 4] )
__SCREAMING_SNAKE_CASE: List[Any] = DummyModel()
__SCREAMING_SNAKE_CASE: Tuple = torch.optim.Adam(net.parameters() )
__SCREAMING_SNAKE_CASE: Any = Accelerator()
with self.assertRaises(snake_case_ ) as ve:
accelerator.register_for_checkpointing(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__SCREAMING_SNAKE_CASE: List[str] = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def snake_case_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__SCREAMING_SNAKE_CASE: List[str] = DummyModel()
__SCREAMING_SNAKE_CASE: Any = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__SCREAMING_SNAKE_CASE: Tuple = torch.optim.lr_scheduler.StepLR(snake_case_ , step_size=1 , gamma=0.99 )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Any = dummy_dataloaders()
__SCREAMING_SNAKE_CASE: List[str] = ProjectConfiguration(automatic_checkpoint_naming=snake_case_ )
# Train baseline
__SCREAMING_SNAKE_CASE: Optional[Any] = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: str = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Save initial
accelerator.save_state()
__SCREAMING_SNAKE_CASE: List[Any] = scheduler.state_dict()
train(3 , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
self.assertNotEqual(snake_case_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(snake_case_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(snake_case_ , scheduler.state_dict() )
def snake_case_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__SCREAMING_SNAKE_CASE: Union[str, Any] = DummyModel()
__SCREAMING_SNAKE_CASE: List[str] = ProjectConfiguration(automatic_checkpoint_naming=snake_case_ , total_limit=2 )
# Train baseline
__SCREAMING_SNAKE_CASE: Optional[Any] = Accelerator(project_dir=snake_case_ , project_config=snake_case_ )
__SCREAMING_SNAKE_CASE: Tuple = accelerator.prepare(snake_case_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(snake_case_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : Tuple = "/tmp/accelerate/state_checkpointing"
lowerCAmelCase : List[str] = DummyModel()
lowerCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowerCAmelCase : Optional[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
lowerCAmelCase : Dict = dummy_dataloaders()
lowerCAmelCase : Any = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowerCAmelCase : Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowerCAmelCase : int = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowerCAmelCase : Optional[int] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowerCAmelCase : Optional[int] = group["params"][0].device
break
assert param_device.type == accelerator.device.type
lowerCAmelCase : str = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
lowerCAmelCase : int = group["params"][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
lowerCAmelCase : Tuple = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 202 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Dict = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 27 | 0 |
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__snake_case = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) ->Union[str, Any]:
require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
| 451 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__A : List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
| 27 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __snake_case ( __snake_case ):
"""simple docstring"""
def __init__( self :str , *UpperCamelCase__ :Any , **UpperCamelCase__ :Any ):
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 388 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def __lowerCAmelCase( ) -> None:
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 27 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase (__snake_case ):
_UpperCamelCase = ["""image_processor""", """tokenizer"""]
_UpperCamelCase = """Pix2StructImageProcessor"""
_UpperCamelCase = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , A_ , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = False
super().__init__(snake_case_ , snake_case_ )
def __call__( self , A_=None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 2048 , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ) ->List[str]:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
__lowerCAmelCase : Union[str, Any] = self.tokenizer
__lowerCAmelCase : Any = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__lowerCAmelCase : List[str] = self.image_processor(
snake_case_ , return_tensors=snake_case_ , max_patches=snake_case_ , **snake_case_ )
else:
# add pixel_values and bbox
__lowerCAmelCase : Tuple = self.image_processor(
snake_case_ , return_tensors=snake_case_ , max_patches=snake_case_ , header_text=snake_case_ , **snake_case_ )
if text is not None and not self.image_processor.is_vqa:
__lowerCAmelCase : Dict = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
if "attention_mask" in text_encoding:
__lowerCAmelCase : Union[str, Any] = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
__lowerCAmelCase : Any = text_encoding.pop('''input_ids''' )
else:
__lowerCAmelCase : str = None
if text_encoding is not None:
encoding_image_processor.update(snake_case_ )
return encoding_image_processor
def UpperCamelCase__ ( self , *A_ , **A_ ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def UpperCamelCase__ ( self , *A_ , **A_ ) ->List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : int = self.tokenizer.model_input_names
__lowerCAmelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 492 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , ):
_A = parent
_A = 13
_A = 7
_A = True
_A = True
_A = True
_A = 99
_A = 32
_A = 2
_A = 4
_A = 37
_A = 'gelu'
_A = 0.1
_A = 0.1
_A = 512
_A = 16
_A = 2
_A = 0.02
_A = 3
_A = 4
_A = None
def lowerCAmelCase__ ( self ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = self.prepare_config_and_inputs()
_A = True
_A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = TFEsmModel(config=snake_case_ )
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
_A = model(snake_case_ )
_A = [input_ids, input_mask]
_A = model(snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_A = True
_A = TFEsmModel(config=snake_case_ )
_A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
_A = model(snake_case_ )
_A = [input_ids, input_mask]
_A = model(snake_case_ , encoder_hidden_states=snake_case_ )
# Also check the case where encoder outputs are not passed
_A = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = TFEsmForMaskedLM(config=snake_case_ )
_A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = TFEsmForTokenClassification(config=snake_case_ )
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
_A = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self ):
_A = self.prepare_config_and_inputs()
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = config_and_inputs
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__magic_name__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def lowerCAmelCase__ ( self ):
_A = TFEsmModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowerCAmelCase__ ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFEsmModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip('Protein models do not support embedding resizing.' )
def lowerCAmelCase__ ( self ):
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
_A, _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(snake_case_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_A = model.get_bias()
assert isinstance(snake_case_ , snake_case_ )
for k, v in name.items():
assert isinstance(snake_case_ , tf.Variable )
else:
_A = model.get_output_embeddings()
assert x is None
_A = model.get_bias()
assert name is None
@require_tf
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ):
_A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
_A = tf.constant([[0, 1, 2, 3, 4, 5]] )
_A = model(snake_case_ )[0]
_A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , snake_case_ )
# compare the actual values for a slice.
_A = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def lowerCAmelCase__ ( self ):
_A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
_A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_A = model(snake_case_ )[0]
# compare the actual values for a slice.
_A = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 27 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __snake_case ( __snake_case ):
_a : Union[str, Any]= "Wav2Vec2FeatureExtractor"
_a : Any= "AutoTokenizer"
def __init__( self ,snake_case ,snake_case ):
'''simple docstring'''
super().__init__(snake_case_ ,snake_case_ )
lowercase : Dict = self.feature_extractor
lowercase : str = False
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,snake_case ,**snake_case ):
'''simple docstring'''
try:
return super().from_pretrained(snake_case_ ,**snake_case_ )
except OSError:
warnings.warn(
f"Loading a tokenizer inside {cls.__name__} from a config that does not"
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ ,snake_case_ ,)
lowercase : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(snake_case_ ,**snake_case_ )
lowercase : Any = WavaVecaCTCTokenizer.from_pretrained(snake_case_ ,**snake_case_ )
return cls(feature_extractor=snake_case_ ,tokenizer=snake_case_ )
def __call__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*snake_case_ ,**snake_case_ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
lowercase : str = kwargs.pop("""raw_speech""" )
else:
lowercase : str = kwargs.pop("""audio""" ,snake_case_ )
lowercase : int = kwargs.pop("""sampling_rate""" ,snake_case_ )
lowercase : str = kwargs.pop("""text""" ,snake_case_ )
if len(snake_case_ ) > 0:
lowercase : List[Any] = args[0]
lowercase : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
lowercase : str = self.feature_extractor(snake_case_ ,*snake_case_ ,sampling_rate=snake_case_ ,**snake_case_ )
if text is not None:
lowercase : Dict = self.tokenizer(snake_case_ ,**snake_case_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase : Dict = encodings["""input_ids"""]
return inputs
def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case_ ,**snake_case_ )
lowercase : Tuple = kwargs.pop("""input_features""" ,snake_case_ )
lowercase : Union[str, Any] = kwargs.pop("""labels""" ,snake_case_ )
if len(snake_case_ ) > 0:
lowercase : Optional[Any] = args[0]
lowercase : Dict = args[1:]
if input_features is not None:
lowercase : str = self.feature_extractor.pad(snake_case_ ,*snake_case_ ,**snake_case_ )
if labels is not None:
lowercase : Optional[Any] = self.tokenizer.pad(snake_case_ ,**snake_case_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowercase : Any = labels["""input_ids"""]
return input_features
def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ ,**snake_case_ )
def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ ,**snake_case_ )
@contextmanager
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
lowercase : Tuple = True
lowercase : str = self.tokenizer
yield
lowercase : Dict = self.feature_extractor
lowercase : List[Any] = False
| 336 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = filter(lambda _SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() )
_A = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__A : Union[str, Any] = logging.getLogger(__name__)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if metric == "rouge2":
_A = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_A = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_A = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_A = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_A = ModelCheckpoint(
dirpath=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , )
class lowerCamelCase( pl.Callback ):
'''simple docstring'''
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
_A = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case_ )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=True ):
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****" )
_A = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_A = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A = od / 'test_results.txt'
_A = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_A = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=snake_case_ )
generations_file.parent.mkdir(exist_ok=snake_case_ )
with open(snake_case_ , 'a+' ) as writer:
for key in sorted(snake_case_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_A = metrics[key]
if isinstance(snake_case_ , torch.Tensor ):
_A = val.item()
_A = F"{key}: {val:.6f}\n"
writer.write(snake_case_ )
if not save_generations:
return
if "preds" in metrics:
_A = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(snake_case_ )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
try:
_A = pl_module.model.model.num_parameters()
except AttributeError:
_A = pl_module.model.num_parameters()
_A = count_trainable_parameters(snake_case_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case_ , snake_case_ , 'test' )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 27 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
"""simple docstring"""
a , a = image.size
a , a = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
a = image.resize((w, h), resample=PIL_INTERPOLATION['''lanczos'''] )
a = np.array(_SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 255.0
a = image[None].transpose(0, 3, 1, 2 )
a = torch.from_numpy(_SCREAMING_SNAKE_CASE )
return 2.0 * image - 1.0
class lowerCamelCase_ ( __snake_case ):
def __init__( self : Optional[int] ,__lowerCamelCase : Tuple ,__lowerCamelCase : int ,__lowerCamelCase : Tuple ,):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=snake_case_ ,unet=snake_case_ ,scheduler=snake_case_ )
@torch.no_grad()
def __call__( self : Optional[int] ,__lowerCamelCase : Any = None ,__lowerCamelCase : Any = 1 ,__lowerCamelCase : Any = 1_00 ,__lowerCamelCase : Tuple = 0.0 ,__lowerCamelCase : List[Any] = None ,__lowerCamelCase : Dict = "pil" ,__lowerCamelCase : int = True ,):
'''simple docstring'''
if isinstance(snake_case_ ,PIL.Image.Image ):
a = 1
elif isinstance(snake_case_ ,torch.Tensor ):
a = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}""" )
if isinstance(snake_case_ ,PIL.Image.Image ):
a = preprocess(snake_case_ )
a , a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
a = (batch_size, self.unet.config.in_channels // 2, height, width)
a = next(self.unet.parameters() ).dtype
a = randn_tensor(snake_case_ ,generator=snake_case_ ,device=self.device ,dtype=snake_case_ )
a = image.to(device=self.device ,dtype=snake_case_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case_ ,device=self.device )
a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a = {}
if accepts_eta:
a = eta
for t in self.progress_bar(snake_case_ ):
# concat latents and low resolution image in the channel dimension.
a = torch.cat([latents, image] ,dim=1 )
a = self.scheduler.scale_model_input(snake_case_ ,snake_case_ )
# predict the noise residual
a = self.unet(snake_case_ ,snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
a = self.scheduler.step(snake_case_ ,snake_case_ ,snake_case_ ,**snake_case_ ).prev_sample
# decode the image latents with the VQVAE
a = self.vqvae.decode(snake_case_ ).sample
a = torch.clamp(snake_case_ ,-1.0 ,1.0 )
a = image / 2 + 0.5
a = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
a = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 387 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = [[float('inf' ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_A = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_SCREAMING_SNAKE_CASE ):
# looping through rows of graph array
for i in range(_SCREAMING_SNAKE_CASE ):
# looping through columns of graph array
for j in range(_SCREAMING_SNAKE_CASE ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_A = dist[i][k] + dist[k][j]
_print_dist(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return dist, v
if __name__ == "__main__":
__A : Dict = int(input("Enter number of vertices: "))
__A : Union[str, Any] = int(input("Enter number of edges: "))
__A : List[str] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
__A : List[Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
__A : Union[str, Any] = int(input("Enter source:"))
__A : List[str] = int(input("Enter destination:"))
__A : Union[str, Any] = float(input("Enter weight:"))
__A : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 27 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Dict = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 602 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__A : Optional[int] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
if subparsers is not None:
_A = subparsers.add_parser('tpu-config' , description=_description )
else:
_A = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
_A = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=_SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=_SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
_A = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=_SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_SCREAMING_SNAKE_CASE ):
_A = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_A = defaults.command_file
if not args.command and defaults.commands is not None:
_A = defaults.commands
if not args.tpu_name:
_A = defaults.tpu_name
if not args.tpu_zone:
_A = defaults.tpu_zone
if args.accelerate_version == "dev":
_A = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
_A = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , _SCREAMING_SNAKE_CASE ):
_A = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
_A = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _SCREAMING_SNAKE_CASE ):
_A = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_A = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
_A = '; '.join(_SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_A = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(_SCREAMING_SNAKE_CASE )}" )
return
subprocess.run(_SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def __lowerCAmelCase( ) -> Tuple:
"""simple docstring"""
_A = tpu_command_parser()
_A = parser.parse_args()
tpu_command_launcher(_SCREAMING_SNAKE_CASE )
| 27 | 0 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : Any ) -> list[list[int]]:
_snake_case = []
if len(_SCREAMING_SNAKE_CASE ) == 1:
return [nums.copy()]
for _ in range(len(_SCREAMING_SNAKE_CASE ) ):
_snake_case = nums.pop(0 )
_snake_case = permute(_SCREAMING_SNAKE_CASE )
for perm in permutations:
perm.append(_SCREAMING_SNAKE_CASE )
result.extend(_SCREAMING_SNAKE_CASE )
nums.append(_SCREAMING_SNAKE_CASE )
return result
def _UpperCAmelCase ( __lowerCamelCase : int ) -> Optional[Any]:
def backtrack(__lowerCamelCase : Dict ):
if start == len(_SCREAMING_SNAKE_CASE ) - 1:
output.append(nums[:] )
else:
for i in range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ):
_snake_case , _snake_case = nums[i], nums[start]
backtrack(start + 1 )
_snake_case , _snake_case = nums[i], nums[start] # backtrack
_snake_case = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
UpperCAmelCase__ = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 224 |
from ... import PretrainedConfig
__A : Optional[Any] = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__magic_name__ = 'nezha'
def __init__( self , snake_case_=2_1128 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=64 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.1 , snake_case_=0 , snake_case_=2 , snake_case_=3 , snake_case_=True , **snake_case_ , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = max_relative_position
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = classifier_dropout
_A = use_cache
| 27 | 0 |
from __future__ import annotations
from collections.abc import Callable
a_ = list[list[float | int]]
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : str = len(_SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE : Dict = [[0 for _ in range(size + 1)] for _ in range(_SCREAMING_SNAKE_CASE)]
SCREAMING_SNAKE_CASE : Tuple = 42
SCREAMING_SNAKE_CASE : List[str] = 42
SCREAMING_SNAKE_CASE : List[Any] = 42
SCREAMING_SNAKE_CASE : List[str] = 42
SCREAMING_SNAKE_CASE : Union[str, Any] = 42
SCREAMING_SNAKE_CASE : Any = 42
for row in range(_SCREAMING_SNAKE_CASE):
for col in range(_SCREAMING_SNAKE_CASE):
SCREAMING_SNAKE_CASE : Dict = matrix[row][col]
SCREAMING_SNAKE_CASE : Optional[Any] = vector[row][0]
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Dict = 0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE : Optional[int] = max((abs(augmented[rowa][col]), rowa) for rowa in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE))[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _SCREAMING_SNAKE_CASE):
SCREAMING_SNAKE_CASE : Tuple = augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE : List[Any] = 0
for cola in range(col + 1 , size + 1):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _SCREAMING_SNAKE_CASE):
for row in range(_SCREAMING_SNAKE_CASE):
SCREAMING_SNAKE_CASE : str = augmented[row][col] / augmented[col][col]
for cola in range(_SCREAMING_SNAKE_CASE , size + 1):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10)] for row in range(_SCREAMING_SNAKE_CASE)
]
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[int] = len(_SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE : str = [[0 for _ in range(_SCREAMING_SNAKE_CASE)] for _ in range(_SCREAMING_SNAKE_CASE)]
SCREAMING_SNAKE_CASE : Dict = [[0] for _ in range(_SCREAMING_SNAKE_CASE)]
SCREAMING_SNAKE_CASE : int = 42
SCREAMING_SNAKE_CASE : str = 42
SCREAMING_SNAKE_CASE : Dict = 42
SCREAMING_SNAKE_CASE : List[str] = 42
for x_val, y_val in enumerate(_SCREAMING_SNAKE_CASE):
for col in range(_SCREAMING_SNAKE_CASE):
SCREAMING_SNAKE_CASE : List[str] = (x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE : List[str] = y_val
SCREAMING_SNAKE_CASE : Optional[int] = solve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def interpolated_func(_a) -> int:
return sum(
round(coeffs[x_val][0]) * (var ** (size - x_val - 1))
for x_val in range(_SCREAMING_SNAKE_CASE))
return interpolated_func
def lowerCamelCase__ ( _a):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _a = question_function , _a = 10):
SCREAMING_SNAKE_CASE : Dict = [func(_SCREAMING_SNAKE_CASE) for x_val in range(1 , order + 1)]
SCREAMING_SNAKE_CASE : int = [
interpolate(data_points[:max_coeff]) for max_coeff in range(1 , order + 1)
]
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Dict = 42
SCREAMING_SNAKE_CASE : Dict = 42
for poly in polynomials:
SCREAMING_SNAKE_CASE : str = 1
while func(_SCREAMING_SNAKE_CASE) == poly(_SCREAMING_SNAKE_CASE):
x_val += 1
ret += poly(_SCREAMING_SNAKE_CASE)
return ret
if __name__ == "__main__":
print(F'''{solution() = }''') | 25 |
from collections import defaultdict
from math import ceil, sqrt
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 1_000_000 , _SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
_A = defaultdict(_SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_A = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_A = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"{solution() = }")
| 27 | 0 |
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A__ ( __snake_case ):
def __init__( self : Optional[int] , *_a : int , _a : Optional[Any]=None , _a : List[str]=None , **_a : str ) -> Optional[int]:
'''simple docstring'''
super().__init__(*snake_case_ , **snake_case_ )
_SCREAMING_SNAKE_CASE =eval_examples
_SCREAMING_SNAKE_CASE =post_process_function
def A ( self : str , _a : List[Any]=None , _a : str=None , _a : str=None , _a : Any = "eval" ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.eval_dataset if eval_dataset is None else eval_dataset
_SCREAMING_SNAKE_CASE =self.get_eval_dataloader(snake_case_ )
_SCREAMING_SNAKE_CASE =self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE =self.compute_metrics
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_SCREAMING_SNAKE_CASE =time.time()
try:
_SCREAMING_SNAKE_CASE =eval_loop(
snake_case_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case_ , metric_key_prefix=snake_case_ , )
finally:
_SCREAMING_SNAKE_CASE =compute_metrics
_SCREAMING_SNAKE_CASE =self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
snake_case_ , snake_case_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_SCREAMING_SNAKE_CASE =self.post_process_function(snake_case_ , snake_case_ , output.predictions )
_SCREAMING_SNAKE_CASE =self.compute_metrics(snake_case_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
_SCREAMING_SNAKE_CASE =metrics.pop(snake_case_ )
metrics.update(output.metrics )
else:
_SCREAMING_SNAKE_CASE =output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(snake_case_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_SCREAMING_SNAKE_CASE =self.callback_handler.on_evaluate(self.args , self.state , self.control , snake_case_ )
return metrics
def A ( self : Union[str, Any] , _a : Tuple , _a : List[str] , _a : Optional[int]=None , _a : List[str] = "test" ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_test_dataloader(snake_case_ )
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE =self.compute_metrics
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_SCREAMING_SNAKE_CASE =time.time()
try:
_SCREAMING_SNAKE_CASE =eval_loop(
snake_case_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case_ , metric_key_prefix=snake_case_ , )
finally:
_SCREAMING_SNAKE_CASE =compute_metrics
_SCREAMING_SNAKE_CASE =self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
snake_case_ , snake_case_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_SCREAMING_SNAKE_CASE =self.post_process_function(snake_case_ , snake_case_ , output.predictions , 'predict' )
_SCREAMING_SNAKE_CASE =self.compute_metrics(snake_case_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
_SCREAMING_SNAKE_CASE =metrics.pop(snake_case_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=snake_case_ )
| 405 |
from math import pi, sqrt, tan
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
_A = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
_A = (sidea + sidea + sidea) / 2
_A = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 27 | 0 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
_lowerCAmelCase = 0
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
for i in range(n - 1 ):
for j in range(i + 1 , _SCREAMING_SNAKE_CASE ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def __a(SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) <= 1:
return arr, 0
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) // 2
_lowerCAmelCase = arr[0:mid]
_lowerCAmelCase = arr[mid:]
_lowerCAmelCase , _lowerCAmelCase = count_inversions_recursive(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase = count_inversions_recursive(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase = _count_cross_inversions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = _lowerCAmelCase = _lowerCAmelCase = 0
while i < len(_SCREAMING_SNAKE_CASE ) and j < len(_SCREAMING_SNAKE_CASE ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_SCREAMING_SNAKE_CASE ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_SCREAMING_SNAKE_CASE ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def __a():
'''simple docstring'''
_lowerCAmelCase = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_lowerCAmelCase = count_inversions_bf(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase = count_inversions_recursive(_SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , _SCREAMING_SNAKE_CASE )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_lowerCAmelCase = count_inversions_bf(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase = count_inversions_recursive(_SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , _SCREAMING_SNAKE_CASE )
# an empty list should also have zero inversions
_lowerCAmelCase = []
_lowerCAmelCase = count_inversions_bf(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase = count_inversions_recursive(_SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 18 |
import numpy as np
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> np.array:
"""simple docstring"""
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 | 0 |
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = [1]
for i in range(2 , _SCREAMING_SNAKE_CASE ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__SCREAMING_SNAKE_CASE: Union[str, Any] = []
__SCREAMING_SNAKE_CASE: List[Any] = list(range(_SCREAMING_SNAKE_CASE ) )
# Find permutation
while factorials:
__SCREAMING_SNAKE_CASE: List[Any] = factorials.pop()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: List[str] = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 202 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 27 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__snake_case = trt.Logger(trt.Logger.WARNING)
__snake_case = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__snake_case = logging.getLogger(__name__)
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--onnx_model_path""",
default=None,
type=str,
required=True,
help="""Path to ONNX model: """,
)
parser.add_argument(
"""--output_dir""",
default=None,
type=str,
required=True,
help="""The output directory where the model checkpoints and predictions will be written.""",
)
# Other parameters
parser.add_argument(
"""--tokenizer_name""",
default="""""",
type=str,
required=True,
help="""Pretrained tokenizer name or path if not the same as model_name""",
)
parser.add_argument(
"""--version_2_with_negative""",
action="""store_true""",
help="""If true, the SQuAD examples contain some that do not have an answer.""",
)
parser.add_argument(
"""--null_score_diff_threshold""",
type=float,
default=0.0,
help="""If null_score - best_non_null is greater than the threshold predict null.""",
)
parser.add_argument(
"""--max_seq_length""",
default=384,
type=int,
help=(
"""The maximum total input sequence length after WordPiece tokenization. Sequences """
"""longer than this will be truncated, and sequences shorter than this will be padded."""
),
)
parser.add_argument(
"""--doc_stride""",
default=128,
type=int,
help="""When splitting up a long document into chunks, how much stride to take between chunks.""",
)
parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""")
parser.add_argument(
"""--n_best_size""",
default=20,
type=int,
help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""",
)
parser.add_argument(
"""--max_answer_length""",
default=30,
type=int,
help=(
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
),
)
parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""")
parser.add_argument(
"""--dataset_name""",
type=str,
default=None,
required=True,
help="""The name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--dataset_config_name""",
type=str,
default=None,
help="""The configuration name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data."""
)
parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--fp16""",
action="""store_true""",
help="""Whether to use 16-bit (mixed) precision instead of 32-bit""",
)
parser.add_argument(
"""--int8""",
action="""store_true""",
help="""Whether to use INT8""",
)
__snake_case = parser.parse_args()
if args.tokenizer_name:
__snake_case = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name."""
)
logger.info("""Training/evaluation parameters %s""", args)
__snake_case = args.per_device_eval_batch_size
__snake_case = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__snake_case = True
__snake_case = "temp_engine/bert-fp32.engine"
if args.fpaa:
__snake_case = "temp_engine/bert-fp16.engine"
if args.inta:
__snake_case = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("""temp_engine"""):
os.makedirs("""temp_engine""")
__snake_case = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, """rb""") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__snake_case = [network.get_input(i) for i in range(network.num_inputs)]
__snake_case = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__snake_case = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__snake_case = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__snake_case = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, """wb""") as f:
f.write(engine.serialize())
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->List[str]:
lowercase_ = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
lowercase_ = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
lowercase_ = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _SCREAMING_SNAKE_CASE )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _SCREAMING_SNAKE_CASE )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _SCREAMING_SNAKE_CASE )
# start time
lowercase_ = time.time()
# Run inference
context.execute_async(
bindings=[int(_SCREAMING_SNAKE_CASE ) for d_inp in d_inputs] + [int(_SCREAMING_SNAKE_CASE ), int(_SCREAMING_SNAKE_CASE )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
cuda.memcpy_dtoh_async(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowercase_ = time.time()
lowercase_ = end_time - start_time
lowercase_ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__snake_case = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__snake_case = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("""Evaluation requires a dataset name""")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__snake_case = raw_datasets["validation"].column_names
__snake_case = "question" if "question" in column_names else column_names[0]
__snake_case = "context" if "context" in column_names else column_names[1]
__snake_case = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__snake_case = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
__snake_case = min(args.max_seq_length, tokenizer.model_max_length)
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Tuple:
lowercase_ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowercase_ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=_SCREAMING_SNAKE_CASE , stride=args.doc_stride , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowercase_ = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowercase_ = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowercase_ = tokenized_examples.sequence_ids(_SCREAMING_SNAKE_CASE )
lowercase_ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowercase_ = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowercase_ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__snake_case = raw_datasets["validation"]
# Validation Feature Creation
__snake_case = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="""Running tokenizer on validation dataset""",
)
__snake_case = default_data_collator
__snake_case = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""])
__snake_case = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="eval" ) ->Optional[Any]:
lowercase_ = postprocess_qa_predictions(
examples=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_SCREAMING_SNAKE_CASE , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowercase_ = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
lowercase_ = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
lowercase_ = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_SCREAMING_SNAKE_CASE , label_ids=_SCREAMING_SNAKE_CASE )
__snake_case = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""")
# Evaluation!
logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path)
with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Tuple:
return trt.volume(engine.get_binding_shape(_SCREAMING_SNAKE_CASE ) ) * engine.get_binding_dtype(_SCREAMING_SNAKE_CASE ).itemsize
# Allocate device memory for inputs and outputs.
__snake_case = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__snake_case = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__snake_case = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__snake_case = cuda.mem_alloc(h_outputa.nbytes)
__snake_case = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__snake_case = cuda.Stream()
# Evaluation
logger.info("""***** Running Evaluation *****""")
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
__snake_case = 0.0
__snake_case = 0
__snake_case = timeit.default_timer()
__snake_case = None
for step, batch in enumerate(eval_dataloader):
__snake_case = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__snake_case = outputs
__snake_case = torch.tensor(start_logits)
__snake_case = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__snake_case = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__snake_case = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__snake_case = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__snake_case = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__snake_case = nested_truncate(all_preds, len(eval_dataset))
__snake_case = timeit.default_timer() - start_time
logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1000 / niter))
logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1000))
logger.info("""Total Number of Inference = %d""", niter)
__snake_case = post_processing_function(eval_examples, eval_dataset, all_preds)
__snake_case = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 451 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__A : List[Any] = "http://www.mocksite.com/file1.txt"
__A : List[Any] = "\"text\": [\"foo\", \"foo\"]"
__A : Dict = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class lowerCamelCase:
'''simple docstring'''
__magic_name__ = 200
__magic_name__ = {'Content-Length': '100'}
__magic_name__ = {}
def lowerCAmelCase__ ( self , **snake_case_ ):
return [bytes(snake_case_ , 'utf-8' )]
def __lowerCAmelCase( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
import requests
monkeypatch.setattr(_SCREAMING_SNAKE_CASE , 'request' , _SCREAMING_SNAKE_CASE )
_A = URL
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = url
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [url]
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = {'train': url}
_A = 'dummy'
_A = 'downloads'
_A = tmp_path
_A = DownloadConfig(
cache_dir=os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , use_etag=_SCREAMING_SNAKE_CASE , )
_A = DownloadManager(dataset_name=_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
_A = dl_manager.download(_SCREAMING_SNAKE_CASE )
_A = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [downloaded_paths]
_A = [urls]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert "train" in downloaded_paths.keys()
_A = downloaded_paths.values()
_A = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_A = Path(_SCREAMING_SNAKE_CASE )
_A = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_A = downloaded_path.read_text()
assert content == CONTENT
_A = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_A = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
_A = str(_SCREAMING_SNAKE_CASE )
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = filename
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [filename]
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = {'train': filename}
_A = 'dummy'
_A = xz_file.parent
_A = 'extracted'
_A = DownloadConfig(
cache_dir=_SCREAMING_SNAKE_CASE , use_etag=_SCREAMING_SNAKE_CASE , )
_A = DownloadManager(dataset_name=_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
_A = dl_manager.extract(_SCREAMING_SNAKE_CASE )
_A = paths
for extracted_paths in [extracted_paths]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [extracted_paths]
_A = [paths]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert "train" in extracted_paths.keys()
_A = extracted_paths.values()
_A = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_A = Path(_SCREAMING_SNAKE_CASE )
_A = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_SCREAMING_SNAKE_CASE , etag=_SCREAMING_SNAKE_CASE )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_A = extracted_path.read_text()
_A = text_file.read_text()
assert extracted_file_content == expected_file_content
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
assert path.endswith('.jsonl' )
for num_items, line in enumerate(_SCREAMING_SNAKE_CASE , start=1 ):
_A = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = request.getfixturevalue(_SCREAMING_SNAKE_CASE )
_A = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ):
_test_jsonl(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_A = request.getfixturevalue(_SCREAMING_SNAKE_CASE )
_A = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ):
_test_jsonl(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert num_tar == 1
assert num_jsonl == 2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) , start=1 ):
assert os.path.basename(_SCREAMING_SNAKE_CASE ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 27 | 0 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 388 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
_A = int(number**0.5 )
return number == sq * sq
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> tuple[int, int]:
"""simple docstring"""
_A = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_A = x_den * y_den * z_den
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 35 ) -> int:
"""simple docstring"""
_A = set()
_A = 42
_A = Fraction(0 )
_A = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_A = x_num * y_den + x_den * y_num
_A = x_den * y_den
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_A = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_A = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
_A = x_num * y_num
_A = x_den * y_num + x_num * y_den
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_A = x_num * x_num * y_num * y_num
_A = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"{solution() = }")
| 27 | 0 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=__snake_case ):
_UpperCamelCase = ["""note_seq"""]
def __init__( self , *A_ , **A_ ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''note_seq'''] )
@classmethod
def UpperCamelCase__ ( cls , *A_ , **A_ ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''note_seq'''] )
@classmethod
def UpperCamelCase__ ( cls , *A_ , **A_ ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ['''note_seq'''] )
| 492 |
from __future__ import annotations
import math
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
if num <= 0:
_A = F"{num}: Invalid input, please enter a positive integer."
raise ValueError(_SCREAMING_SNAKE_CASE )
_A = [True] * (num + 1)
_A = []
_A = 2
_A = int(math.sqrt(_SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , _SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
_A = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 27 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : Union[str, Any] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class __snake_case ( __snake_case ):
_a : Optional[int]= "openai-gpt"
_a : int= {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self ,snake_case=40478 ,snake_case=512 ,snake_case=768 ,snake_case=12 ,snake_case=12 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=0.1 ,snake_case=1e-5 ,snake_case=0.02 ,snake_case="cls_index" ,snake_case=True ,snake_case=None ,snake_case=True ,snake_case=0.1 ,**snake_case ,):
'''simple docstring'''
lowercase : Optional[int] = vocab_size
lowercase : List[str] = n_positions
lowercase : Union[str, Any] = n_embd
lowercase : Tuple = n_layer
lowercase : Tuple = n_head
lowercase : Any = afn
lowercase : Any = resid_pdrop
lowercase : Union[str, Any] = embd_pdrop
lowercase : int = attn_pdrop
lowercase : Tuple = layer_norm_epsilon
lowercase : Union[str, Any] = initializer_range
lowercase : str = summary_type
lowercase : Tuple = summary_use_proj
lowercase : str = summary_activation
lowercase : int = summary_first_dropout
lowercase : str = summary_proj_to_labels
super().__init__(**snake_case_ )
| 336 |
__A : Dict = "Alexander Joslin"
import operator as op
from .stack import Stack
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
_A = Stack()
_A = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(_SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
_A = operator_stack.peek()
operator_stack.pop()
_A = operand_stack.peek()
operand_stack.pop()
_A = operand_stack.peek()
operand_stack.pop()
_A = operators[opr](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
operand_stack.push(_SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__A : Any = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 27 | 0 |
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> int:
"""simple docstring"""
def count_of_possible_combinations(snake_case_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
snake_case_, snake_case_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
a = sum(
count_of_possible_combinations_with_dp_array(target - item, _SCREAMING_SNAKE_CASE )
for item in array )
a = answer
return answer
a = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> int:
"""simple docstring"""
a = [0] * (target + 1)
a = 1
for i in range(1, target + 1 ):
for j in range(_SCREAMING_SNAKE_CASE ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : str = 3
UpperCamelCase__ : Tuple = 5
UpperCamelCase__ : int = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 387 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
_A = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def lowerCAmelCase__ ( self ):
_A = self.dummy_uncond_unet
_A = KarrasVeScheduler()
_A = KarrasVePipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=2 , generator=snake_case_ , output_type='numpy' ).images
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=2 , generator=snake_case_ , output_type='numpy' , return_dict=snake_case_ )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
_A = 'google/ncsnpp-celebahq-256'
_A = UNetaDModel.from_pretrained(snake_case_ )
_A = KarrasVeScheduler()
_A = KarrasVePipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=20 , generator=snake_case_ , output_type='numpy' ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_A = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 27 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 602 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__A : str = random.Random()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
if rng is None:
_A = global_rng
_A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=400 , snake_case_=2000 , snake_case_=2048 , snake_case_=128 , snake_case_=1 , snake_case_=512 , snake_case_=30 , snake_case_=4_4100 , ):
_A = parent
_A = batch_size
_A = min_seq_length
_A = max_seq_length
_A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A = spectrogram_length
_A = feature_size
_A = num_audio_channels
_A = hop_length
_A = chunk_length
_A = sampling_rate
def lowerCAmelCase__ ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCAmelCase__ ( self , snake_case_=False , snake_case_=False ):
def _flatten(snake_case_ ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
_A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase( __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = TvltFeatureExtractor
def lowerCAmelCase__ ( self ):
_A = TvltFeatureExtractionTester(self )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(snake_case_ , 'feature_size' ) )
self.assertTrue(hasattr(snake_case_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(snake_case_ , 'hop_length' ) )
self.assertTrue(hasattr(snake_case_ , 'chunk_length' ) )
self.assertTrue(hasattr(snake_case_ , 'sampling_rate' ) )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
_A = self.feature_extraction_class.from_pretrained(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(snake_case_ , 'feat_extract.json' )
feat_extract_first.to_json_file(snake_case_ )
_A = self.feature_extraction_class.from_json_file(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
# Initialize feature_extractor
_A = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test not batched input
_A = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A = feature_extractor(
snake_case_ , return_tensors='np' , sampling_rate=4_4100 , mask_audio=snake_case_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A = np.asarray(snake_case_ )
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCAmelCase__ ( self , snake_case_ ):
_A = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_A = ds.sort('id' ).select(range(snake_case_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self ):
_A = self._load_datasamples(1 )
_A = TvltFeatureExtractor()
_A = feature_extractor(snake_case_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case_ , atol=1E-4 ) )
| 27 | 0 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 224 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('check_bouncy() accepts only integer arguments' )
_A = str(_SCREAMING_SNAKE_CASE )
_A = ''.join(sorted(_SCREAMING_SNAKE_CASE ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError('solution() only accepts values from 0 to 100' )
_A = 0
_A = 1
while True:
if check_bouncy(_SCREAMING_SNAKE_CASE ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"{solution(99)}")
| 27 | 0 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , a : Dict , a : Any=13 , a : Optional[Any]=7 , a : Union[str, Any]=True , a : List[Any]=True , a : Optional[int]=True , a : Dict=True , a : Union[str, Any]=99 , a : Any=32 , a : Optional[Any]=5 , a : Tuple=4 , a : Optional[int]=37 , a : Union[str, Any]="gelu" , a : str=0.1 , a : Optional[Any]=0.1 , a : Dict=512 , a : List[Any]=16 , a : Union[str, Any]=2 , a : Optional[int]=0.02 , a : Optional[int]=4 , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : str = type_vocab_size
SCREAMING_SNAKE_CASE : int = type_sequence_label_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = num_choices
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _UpperCamelCase ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =True
lowerCamelCase__ =(
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = FlaxRoFormerModelTester(self )
@slow
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=snake_case_ )
SCREAMING_SNAKE_CASE : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case_ )
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
SCREAMING_SNAKE_CASE : List[str] = jnp.array([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : int = model(snake_case_ )[0]
SCREAMING_SNAKE_CASE : Optional[int] = 5_0000
SCREAMING_SNAKE_CASE : Optional[int] = (1, 6, vocab_size)
self.assertEqual(output.shape , snake_case_ )
SCREAMING_SNAKE_CASE : List[Any] = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , snake_case_ , atol=1e-4 ) ) | 25 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"{price_plus_tax(100, 0.2_5) = }")
print(f"{price_plus_tax(1_2_5.5_0, 0.0_5) = }")
| 27 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> list:
"""simple docstring"""
def merge(_UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_SCREAMING_SNAKE_CASE ) <= 1:
return collection
_SCREAMING_SNAKE_CASE =len(_SCREAMING_SNAKE_CASE ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Tuple = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase : Tuple = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 405 |
from collections.abc import Callable
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
_A = a
_A = b
if function(_SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(_SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
_A = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) < 0:
_A = mid
else:
_A = mid
_A = start + (end - start) / 2.0
return mid
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 27 | 0 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase = [x.strip() for x in open(_SCREAMING_SNAKE_CASE ).readlines()]
_lowerCAmelCase = [x.strip() for x in open(_SCREAMING_SNAKE_CASE ).readlines()][: len(_SCREAMING_SNAKE_CASE )]
_lowerCAmelCase = calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if save_path is not None:
save_json(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 18 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def lowerCAmelCase__ ( self ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = NystromformerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
_A = model(snake_case_ , token_type_ids=snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = NystromformerForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = NystromformerForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = NystromformerForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = NystromformerForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_choices
_A = NystromformerForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self ):
_A = self.prepare_config_and_inputs()
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = config_and_inputs
_A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def lowerCAmelCase__ ( self ):
_A = NystromformerModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A = type
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowerCAmelCase__ ( self ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = NystromformerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ):
_A = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
_A = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_A = model(snake_case_ )[0]
_A = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self ):
_A = 'the [MASK] of Belgium is Brussels'
_A = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
_A = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
_A = tokenizer(snake_case_ , return_tensors='pt' )
with torch.no_grad():
_A = model(encoding.input_ids ).logits
_A = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case_ ) , 'capital' )
| 27 | 0 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class a ( __snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''owlvit_text_model'''
def __init__( self , _lowerCAmelCase=49408 , _lowerCAmelCase=512 , _lowerCAmelCase=2048 , _lowerCAmelCase=12 , _lowerCAmelCase=8 , _lowerCAmelCase=16 , _lowerCAmelCase="quick_gelu" , _lowerCAmelCase=1e-5 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1.0 , _lowerCAmelCase=0 , _lowerCAmelCase=49406 , _lowerCAmelCase=49407 , **_lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
__SCREAMING_SNAKE_CASE: Any = vocab_size
__SCREAMING_SNAKE_CASE: List[Any] = hidden_size
__SCREAMING_SNAKE_CASE: Any = intermediate_size
__SCREAMING_SNAKE_CASE: Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE: Any = num_attention_heads
__SCREAMING_SNAKE_CASE: Union[str, Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE: Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE: int = layer_norm_eps
__SCREAMING_SNAKE_CASE: Optional[Any] = attention_dropout
__SCREAMING_SNAKE_CASE: Tuple = initializer_range
__SCREAMING_SNAKE_CASE: Optional[int] = initializer_factor
@classmethod
def snake_case_ ( cls , _lowerCAmelCase , **_lowerCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(snake_case_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: List[Any] = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
__SCREAMING_SNAKE_CASE: Any = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case_ , **snake_case_ )
class a ( __snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = '''owlvit_vision_model'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=3072 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3 , _lowerCAmelCase=768 , _lowerCAmelCase=32 , _lowerCAmelCase="quick_gelu" , _lowerCAmelCase=1e-5 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1.0 , **_lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**snake_case_ )
__SCREAMING_SNAKE_CASE: int = hidden_size
__SCREAMING_SNAKE_CASE: Any = intermediate_size
__SCREAMING_SNAKE_CASE: Any = num_hidden_layers
__SCREAMING_SNAKE_CASE: Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE: Tuple = num_channels
__SCREAMING_SNAKE_CASE: Any = image_size
__SCREAMING_SNAKE_CASE: List[str] = patch_size
__SCREAMING_SNAKE_CASE: str = hidden_act
__SCREAMING_SNAKE_CASE: List[Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE: str = attention_dropout
__SCREAMING_SNAKE_CASE: Dict = initializer_range
__SCREAMING_SNAKE_CASE: str = initializer_factor
@classmethod
def snake_case_ ( cls , _lowerCAmelCase , **_lowerCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(snake_case_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: List[Any] = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
__SCREAMING_SNAKE_CASE: int = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case_ , **snake_case_ )
class a ( __snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = '''owlvit'''
SCREAMING_SNAKE_CASE__ : Any = True
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=512 , _lowerCAmelCase=2.6592 , _lowerCAmelCase=True , **_lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**snake_case_ )
if text_config is None:
__SCREAMING_SNAKE_CASE: Dict = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
__SCREAMING_SNAKE_CASE: int = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
__SCREAMING_SNAKE_CASE: Union[str, Any] = OwlViTTextConfig(**snake_case_ )
__SCREAMING_SNAKE_CASE: Optional[int] = OwlViTVisionConfig(**snake_case_ )
__SCREAMING_SNAKE_CASE: Any = projection_dim
__SCREAMING_SNAKE_CASE: Tuple = logit_scale_init_value
__SCREAMING_SNAKE_CASE: List[Any] = return_dict
__SCREAMING_SNAKE_CASE: Dict = 1.0
@classmethod
def snake_case_ ( cls , _lowerCAmelCase , **_lowerCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(snake_case_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: List[Any] = cls.get_config_dict(snake_case_ , **snake_case_ )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case_ , **snake_case_ )
@classmethod
def snake_case_ ( cls , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = {}
__SCREAMING_SNAKE_CASE: int = text_config
__SCREAMING_SNAKE_CASE: Dict = vision_config
return cls.from_dict(snake_case_ , **snake_case_ )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE: Dict = self.text_config.to_dict()
__SCREAMING_SNAKE_CASE: int = self.vision_config.to_dict()
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.__class__.model_type
return output
class a ( __snake_case ):
@property
def snake_case_ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def snake_case_ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def snake_case_ ( self ):
"""simple docstring"""
return 1e-4
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = super().generate_dummy_inputs(
processor.tokenizer , batch_size=snake_case_ , seq_length=snake_case_ , framework=snake_case_ )
__SCREAMING_SNAKE_CASE: Union[str, Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=snake_case_ , framework=snake_case_ )
return {**text_input_dict, **image_input_dict}
@property
def snake_case_ ( self ):
"""simple docstring"""
return 14
| 202 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Dict = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 27 | 0 |
'''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ ) ->bool:
lowercase_ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 451 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__A : List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
| 27 | 0 |
"""simple docstring"""
from itertools import permutations
def __a ( a ):
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_a = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(_SCREAMING_SNAKE_CASE ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def __a ( a = 1_0 ):
"""simple docstring"""
return sum(
int("".join(map(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) ) )
for num in permutations(range(_SCREAMING_SNAKE_CASE ) )
if is_substring_divisible(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 388 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def __lowerCAmelCase( ) -> None:
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 27 | 0 |
def _lowercase ( lowercase__ ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
__lowerCAmelCase : List[str] = str(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = ''''''.join(sorted(_SCREAMING_SNAKE_CASE ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _lowercase ( lowercase__ = 9_9 ):
if not 0 < percent < 1_0_0:
raise ValueError('''solution() only accepts values from 0 to 100''' )
__lowerCAmelCase : int = 0
__lowerCAmelCase : Union[str, Any] = 1
while True:
if check_bouncy(_SCREAMING_SNAKE_CASE ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(99)}")
| 492 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , ):
_A = parent
_A = 13
_A = 7
_A = True
_A = True
_A = True
_A = 99
_A = 32
_A = 2
_A = 4
_A = 37
_A = 'gelu'
_A = 0.1
_A = 0.1
_A = 512
_A = 16
_A = 2
_A = 0.02
_A = 3
_A = 4
_A = None
def lowerCAmelCase__ ( self ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = self.prepare_config_and_inputs()
_A = True
_A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = TFEsmModel(config=snake_case_ )
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
_A = model(snake_case_ )
_A = [input_ids, input_mask]
_A = model(snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_A = True
_A = TFEsmModel(config=snake_case_ )
_A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
_A = model(snake_case_ )
_A = [input_ids, input_mask]
_A = model(snake_case_ , encoder_hidden_states=snake_case_ )
# Also check the case where encoder outputs are not passed
_A = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = TFEsmForMaskedLM(config=snake_case_ )
_A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = TFEsmForTokenClassification(config=snake_case_ )
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
_A = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self ):
_A = self.prepare_config_and_inputs()
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = config_and_inputs
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__magic_name__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def lowerCAmelCase__ ( self ):
_A = TFEsmModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowerCAmelCase__ ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFEsmModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip('Protein models do not support embedding resizing.' )
def lowerCAmelCase__ ( self ):
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
_A, _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(snake_case_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_A = model.get_bias()
assert isinstance(snake_case_ , snake_case_ )
for k, v in name.items():
assert isinstance(snake_case_ , tf.Variable )
else:
_A = model.get_output_embeddings()
assert x is None
_A = model.get_bias()
assert name is None
@require_tf
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ):
_A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
_A = tf.constant([[0, 1, 2, 3, 4, 5]] )
_A = model(snake_case_ )[0]
_A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , snake_case_ )
# compare the actual values for a slice.
_A = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def lowerCAmelCase__ ( self ):
_A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
_A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_A = model(snake_case_ )[0]
# compare the actual values for a slice.
_A = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 27 | 0 |
import math
lowercase : List[str] = 10
lowercase : Any = 7
lowercase : str = BALLS_PER_COLOUR * NUM_COLOURS
def _snake_case( SCREAMING_SNAKE_CASE__ = 20 ) -> str:
lowercase : Tuple = math.comb(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase : str = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _SCREAMING_SNAKE_CASE )
lowercase : int = NUM_COLOURS * (1 - missing_colour / total)
return f"{result:.9f}"
if __name__ == "__main__":
print(solution(20))
| 336 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = filter(lambda _SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() )
_A = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__A : Union[str, Any] = logging.getLogger(__name__)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if metric == "rouge2":
_A = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_A = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_A = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_A = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_A = ModelCheckpoint(
dirpath=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , )
class lowerCamelCase( pl.Callback ):
'''simple docstring'''
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
_A = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case_ )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=True ):
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****" )
_A = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_A = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A = od / 'test_results.txt'
_A = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_A = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=snake_case_ )
generations_file.parent.mkdir(exist_ok=snake_case_ )
with open(snake_case_ , 'a+' ) as writer:
for key in sorted(snake_case_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_A = metrics[key]
if isinstance(snake_case_ , torch.Tensor ):
_A = val.item()
_A = F"{key}: {val:.6f}\n"
writer.write(snake_case_ )
if not save_generations:
return
if "preds" in metrics:
_A = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(snake_case_ )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
try:
_A = pl_module.model.model.num_parameters()
except AttributeError:
_A = pl_module.model.num_parameters()
_A = count_trainable_parameters(snake_case_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case_ , snake_case_ , 'test' )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 27 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
UpperCamelCase__ : Any = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 387 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = [[float('inf' ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_A = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_SCREAMING_SNAKE_CASE ):
# looping through rows of graph array
for i in range(_SCREAMING_SNAKE_CASE ):
# looping through columns of graph array
for j in range(_SCREAMING_SNAKE_CASE ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_A = dist[i][k] + dist[k][j]
_print_dist(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return dist, v
if __name__ == "__main__":
__A : Dict = int(input("Enter number of vertices: "))
__A : Union[str, Any] = int(input("Enter number of edges: "))
__A : List[str] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
__A : List[Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
__A : Union[str, Any] = int(input("Enter source:"))
__A : List[str] = int(input("Enter destination:"))
__A : Union[str, Any] = float(input("Enter weight:"))
__A : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 27 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any]=1_3 , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Tuple=True , __UpperCamelCase : str=True , __UpperCamelCase : int=True , __UpperCamelCase : Optional[Any]=9_9 , __UpperCamelCase : List[Any]=3_2 , __UpperCamelCase : List[str]=5 , __UpperCamelCase : Dict=4 , __UpperCamelCase : List[Any]=3_7 , __UpperCamelCase : Union[str, Any]="gelu" , __UpperCamelCase : int=0.1 , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Any=5_1_2 , __UpperCamelCase : List[str]=1_6 , __UpperCamelCase : str=2 , __UpperCamelCase : Dict=0.0_2 , __UpperCamelCase : Dict=3 , __UpperCamelCase : str=4 , __UpperCamelCase : List[str]=None , )->List[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = self.vocab_size - 1
def lowercase__ ( self : Optional[Any] )->Optional[int]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , *__UpperCamelCase : Dict )->Optional[Any]:
_UpperCAmelCase = OpenAIGPTModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , token_type_ids=snake_case_ , head_mask=snake_case_ )
_UpperCAmelCase = model(snake_case_ , token_type_ids=snake_case_ )
_UpperCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : Tuple , *__UpperCamelCase : List[str] )->Union[str, Any]:
_UpperCAmelCase = OpenAIGPTLMHeadModel(snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : str , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , *__UpperCamelCase : List[str] )->Optional[Any]:
_UpperCAmelCase = OpenAIGPTDoubleHeadsModel(snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : str , *__UpperCamelCase : List[Any] )->Any:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = OpenAIGPTForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Optional[Any] )->Optional[Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _a ( __snake_case , __snake_case , __snake_case , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
UpperCamelCase__ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict )->Dict:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowercase__ ( self : int , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any]=False )->List[Any]:
_UpperCAmelCase = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=snake_case_ , )
_UpperCAmelCase = inputs_dict['''labels''']
_UpperCAmelCase = inputs_dict['''labels''']
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=snake_case_ , )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def lowercase__ ( self : Tuple )->Any:
_UpperCAmelCase = OpenAIGPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , n_embd=3_7 )
def lowercase__ ( self : Optional[int] )->int:
self.config_tester.run_common_tests()
def lowercase__ ( self : int )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*snake_case_ )
def lowercase__ ( self : Dict )->List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case_ )
def lowercase__ ( self : Tuple )->Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*snake_case_ )
def lowercase__ ( self : Dict )->Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*snake_case_ )
@slow
def lowercase__ ( self : int )->Optional[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = OpenAIGPTModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase__ ( self : List[Any] )->Optional[int]:
_UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(snake_case_ )
_UpperCAmelCase = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=snake_case_ ) # the president is
_UpperCAmelCase = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCAmelCase = model.generate(snake_case_ , do_sample=snake_case_ )
self.assertListEqual(output_ids[0].tolist() , snake_case_ )
| 602 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__A : Optional[int] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
if subparsers is not None:
_A = subparsers.add_parser('tpu-config' , description=_description )
else:
_A = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
_A = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=_SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=_SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
_A = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=_SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_SCREAMING_SNAKE_CASE ):
_A = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_A = defaults.command_file
if not args.command and defaults.commands is not None:
_A = defaults.commands
if not args.tpu_name:
_A = defaults.tpu_name
if not args.tpu_zone:
_A = defaults.tpu_zone
if args.accelerate_version == "dev":
_A = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
_A = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , _SCREAMING_SNAKE_CASE ):
_A = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
_A = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _SCREAMING_SNAKE_CASE ):
_A = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_A = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
_A = '; '.join(_SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_A = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(_SCREAMING_SNAKE_CASE )}" )
return
subprocess.run(_SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def __lowerCAmelCase( ) -> Tuple:
"""simple docstring"""
_A = tpu_command_parser()
_A = parser.parse_args()
tpu_command_launcher(_SCREAMING_SNAKE_CASE )
| 27 | 0 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ) -> list[int]:
_snake_case = int(_SCREAMING_SNAKE_CASE )
# Initialize Result
_snake_case = []
# Traverse through all denomination
for denomination in reversed(_SCREAMING_SNAKE_CASE ):
# Find denominations
while int(_SCREAMING_SNAKE_CASE ) >= int(_SCREAMING_SNAKE_CASE ):
total_value -= int(_SCREAMING_SNAKE_CASE )
answer.append(_SCREAMING_SNAKE_CASE ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase__ = []
UpperCAmelCase__ = "0"
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
UpperCAmelCase__ = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(F"Denomination {i}: ").strip()))
UpperCAmelCase__ = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
UpperCAmelCase__ = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(F"Following is minimal change for {value}: ")
UpperCAmelCase__ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 224 |
from ... import PretrainedConfig
__A : Optional[Any] = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__magic_name__ = 'nezha'
def __init__( self , snake_case_=2_1128 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=64 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.1 , snake_case_=0 , snake_case_=2 , snake_case_=3 , snake_case_=True , **snake_case_ , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = max_relative_position
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = classifier_dropout
_A = use_cache
| 27 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class _UpperCamelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =['input_features', 'is_longer']
def __init__( self : Union[str, Any] , a : Any=64 , a : Optional[int]=4_8000 , a : Optional[int]=480 , a : Union[str, Any]=10 , a : Any=1024 , a : Tuple=0.0 , a : List[Any]=False , a : List[Any] = 0 , a : Any = 1_4000 , a : List[str] = None , a : Tuple = "fusion" , a : List[Any] = "repeatpad" , **a : Any , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
feature_size=snake_case_ , sampling_rate=snake_case_ , padding_value=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = top_db
SCREAMING_SNAKE_CASE : str = truncation
SCREAMING_SNAKE_CASE : int = padding
SCREAMING_SNAKE_CASE : str = fft_window_size
SCREAMING_SNAKE_CASE : str = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE : Union[str, Any] = hop_length
SCREAMING_SNAKE_CASE : Tuple = max_length_s
SCREAMING_SNAKE_CASE : Optional[Any] = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE : str = sampling_rate
SCREAMING_SNAKE_CASE : List[Any] = frequency_min
SCREAMING_SNAKE_CASE : Optional[Any] = frequency_max
SCREAMING_SNAKE_CASE : str = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case_ , min_frequency=snake_case_ , max_frequency=snake_case_ , sampling_rate=snake_case_ , norm=snake_case_ , mel_scale="htk" , )
SCREAMING_SNAKE_CASE : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case_ , min_frequency=snake_case_ , max_frequency=snake_case_ , sampling_rate=snake_case_ , norm="slaney" , mel_scale="slaney" , )
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCamelCase ( self : List[str] , a : List[Any] , a : List[Any] = None ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = spectrogram(
snake_case_ , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case_ , log_mel="dB" , )
return log_mel_spectrogram.T
def __UpperCamelCase ( self : Dict , a : Dict , a : str , a : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : Dict = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE : Dict = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE : Optional[int] = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE : List[str] = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE : Optional[Any] = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE : List[str] = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE : Any = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.nn.functional.interpolate(
snake_case_ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=snake_case_ )
SCREAMING_SNAKE_CASE : Any = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE : Dict = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCamelCase ( self : List[Any] , a : Tuple , a : List[str] , a : Union[str, Any] , a : Dict ) -> Dict:
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE : Union[str, Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE : List[str] = len(snake_case_ ) - max_length
SCREAMING_SNAKE_CASE : int = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE : Tuple = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE : str = self._np_extract_fbank_features(snake_case_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE : int = self._np_extract_fbank_features(snake_case_ , self.mel_filters )
SCREAMING_SNAKE_CASE : int = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE : List[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE : Dict = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = False
else:
SCREAMING_SNAKE_CASE : List[Any] = self._random_mel_fusion(snake_case_ , snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE : int = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented" )
else:
SCREAMING_SNAKE_CASE : int = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE : Dict = int(max_length / len(snake_case_ ) )
SCREAMING_SNAKE_CASE : Optional[int] = np.stack(np.tile(snake_case_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE : Union[str, Any] = int(max_length / len(snake_case_ ) )
SCREAMING_SNAKE_CASE : List[Any] = np.stack(np.tile(snake_case_ , snake_case_ ) )
SCREAMING_SNAKE_CASE : List[str] = np.pad(snake_case_ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(snake_case_ , self.mel_filters )
SCREAMING_SNAKE_CASE : Optional[int] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE : str = self._np_extract_fbank_features(snake_case_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Optional[Any] , a : Optional[Any] , a : List[Any] = None , a : List[Any] = None , a : List[Any] = None , a : Any = None , a : Tuple = None , **a : Union[str, Any] , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE : Tuple = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
SCREAMING_SNAKE_CASE : int = isinstance(snake_case_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : List[str] = is_batched_numpy or (
isinstance(snake_case_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(snake_case_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case_ , np.ndarray ):
SCREAMING_SNAKE_CASE : List[str] = np.asarray(snake_case_ , dtype=np.floataa )
elif isinstance(snake_case_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Optional[Any] = [np.asarray(snake_case_ )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE : Union[str, Any] = [
self._get_input_mel(snake_case_ , max_length if max_length else self.nb_max_samples , snake_case_ , snake_case_ )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = []
for mel, longer in padded_inputs:
input_mel.append(snake_case_ )
is_longer.append(snake_case_ )
if truncation == "fusion" and sum(snake_case_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE : List[str] = np.random.randint(0 , len(snake_case_ ) )
SCREAMING_SNAKE_CASE : str = True
if isinstance(input_mel[0] , snake_case_ ):
SCREAMING_SNAKE_CASE : Tuple = [np.asarray(snake_case_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE : Tuple = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE : Tuple = {"input_features": input_mel, "is_longer": is_longer}
SCREAMING_SNAKE_CASE : List[Any] = BatchFeature(snake_case_ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : List[Any] = input_features.convert_to_tensors(snake_case_ )
return input_features | 25 |
from collections import defaultdict
from math import ceil, sqrt
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 1_000_000 , _SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
_A = defaultdict(_SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_A = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_A = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"{solution() = }")
| 27 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__snake_case ):
A__ = ['onnx']
def __init__( self : Union[str, Any] , *_a : Tuple , **_a : Tuple ) -> Any:
'''simple docstring'''
requires_backends(self , ['onnx'] )
@classmethod
def A ( cls : Dict , *_a : Optional[int] , **_a : Tuple ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['onnx'] )
@classmethod
def A ( cls : Optional[Any] , *_a : int , **_a : Any ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['onnx'] )
| 405 |
from math import pi, sqrt, tan
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
_A = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
_A = (sidea + sidea + sidea) / 2
_A = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 27 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 |
import numpy as np
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> np.array:
"""simple docstring"""
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( __snake_case ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : Tuple = KandinskyVaaControlnetPipeline
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
SCREAMING_SNAKE_CASE__ : str = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
SCREAMING_SNAKE_CASE__ : List[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
SCREAMING_SNAKE_CASE__ : Dict = False
@property
def snake_case_ ( self ):
"""simple docstring"""
return 32
@property
def snake_case_ ( self ):
"""simple docstring"""
return 32
@property
def snake_case_ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def snake_case_ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def snake_case_ ( self ):
"""simple docstring"""
return 100
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE: List[Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__SCREAMING_SNAKE_CASE: Any = UNetaDConditionModel(**snake_case_ )
return model
@property
def snake_case_ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE: List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.dummy_unet
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.dummy_movq
__SCREAMING_SNAKE_CASE: List[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.00085 , beta_end=0.012 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=snake_case_ , )
__SCREAMING_SNAKE_CASE: Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
__SCREAMING_SNAKE_CASE: str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case_ )
# create hint
__SCREAMING_SNAKE_CASE: Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
if str(snake_case_ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE: Tuple = torch.manual_seed(snake_case_ )
else:
__SCREAMING_SNAKE_CASE: int = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__SCREAMING_SNAKE_CASE: Optional[Any] = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = '''cpu'''
__SCREAMING_SNAKE_CASE: int = self.get_dummy_components()
__SCREAMING_SNAKE_CASE: Dict = self.pipeline_class(**snake_case_ )
__SCREAMING_SNAKE_CASE: List[str] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__SCREAMING_SNAKE_CASE: Tuple = pipe(**self.get_dummy_inputs(snake_case_ ) )
__SCREAMING_SNAKE_CASE: str = output.images
__SCREAMING_SNAKE_CASE: Optional[Any] = pipe(
**self.get_dummy_inputs(snake_case_ ) , return_dict=snake_case_ , )[0]
__SCREAMING_SNAKE_CASE: Optional[Any] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE: Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE: Optional[int] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
__SCREAMING_SNAKE_CASE: int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
__SCREAMING_SNAKE_CASE: Optional[Any] = torch.from_numpy(np.array(snake_case_ ) ).float() / 255.0
__SCREAMING_SNAKE_CASE: Any = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__SCREAMING_SNAKE_CASE: int = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case_ )
__SCREAMING_SNAKE_CASE: int = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE: Optional[Any] = pipeline.to(snake_case_ )
pipeline.set_progress_bar_config(disable=snake_case_ )
__SCREAMING_SNAKE_CASE: Any = '''A robot, 4k photo'''
__SCREAMING_SNAKE_CASE: List[Any] = torch.Generator(device='''cuda''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Dict = pipe_prior(
snake_case_ , generator=snake_case_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__SCREAMING_SNAKE_CASE: Optional[Any] = torch.Generator(device='''cuda''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE: Dict = pipeline(
image_embeds=snake_case_ , negative_image_embeds=snake_case_ , hint=snake_case_ , generator=snake_case_ , num_inference_steps=100 , output_type='''np''' , )
__SCREAMING_SNAKE_CASE: Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ )
| 202 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 27 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__snake_case = logging.get_logger(__name__)
__snake_case = Dict[str, Any]
__snake_case = List[Prediction]
@add_end_docstrings(__snake_case )
class _a ( __snake_case ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *lowercase_ : Optional[int] , **lowercase_ : Dict ):
'''simple docstring'''
super().__init__(*snake_case_ , **snake_case_ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowerCamelCase__ ( self : Dict , **lowercase_ : int ):
'''simple docstring'''
lowercase_ = {}
if "threshold" in kwargs:
lowercase_ = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : str ):
'''simple docstring'''
return super().__call__(*snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self : Optional[int] , lowercase_ : Optional[int] ):
'''simple docstring'''
lowercase_ = load_image(snake_case_ )
lowercase_ = torch.IntTensor([[image.height, image.width]] )
lowercase_ = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
lowercase_ = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
lowercase_ = target_size
return inputs
def lowerCamelCase__ ( self : List[Any] , lowercase_ : Optional[Any] ):
'''simple docstring'''
lowercase_ = model_inputs.pop("""target_size""" )
lowercase_ = self.model(**snake_case_ )
lowercase_ = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
lowercase_ = model_inputs["""bbox"""]
return model_outputs
def lowerCamelCase__ ( self : str , lowercase_ : int , lowercase_ : Optional[Any]=0.9 ):
'''simple docstring'''
lowercase_ = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowercase_ , lowercase_ = target_size[0].tolist()
def unnormalize(lowercase_ : Optional[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_000),
(height * bbox[1] / 1_000),
(width * bbox[2] / 1_000),
(height * bbox[3] / 1_000),
] ) )
lowercase_ , lowercase_ = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
lowercase_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowercase_ = [unnormalize(snake_case_ ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
lowercase_ = ["""score""", """label""", """box"""]
lowercase_ = [dict(zip(snake_case_ , snake_case_ ) ) for vals in zip(scores.tolist() , snake_case_ , snake_case_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowercase_ = self.image_processor.post_process_object_detection(snake_case_ , snake_case_ , snake_case_ )
lowercase_ = raw_annotations[0]
lowercase_ = raw_annotation["""scores"""]
lowercase_ = raw_annotation["""labels"""]
lowercase_ = raw_annotation["""boxes"""]
lowercase_ = scores.tolist()
lowercase_ = [self.model.config.idalabel[label.item()] for label in labels]
lowercase_ = [self._get_bounding_box(snake_case_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowercase_ = ["""score""", """label""", """box"""]
lowercase_ = [
dict(zip(snake_case_ , snake_case_ ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def lowerCamelCase__ ( self : Optional[int] , lowercase_ : str ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = box.int().tolist()
lowercase_ = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 451 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__A : List[Any] = "http://www.mocksite.com/file1.txt"
__A : List[Any] = "\"text\": [\"foo\", \"foo\"]"
__A : Dict = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class lowerCamelCase:
'''simple docstring'''
__magic_name__ = 200
__magic_name__ = {'Content-Length': '100'}
__magic_name__ = {}
def lowerCAmelCase__ ( self , **snake_case_ ):
return [bytes(snake_case_ , 'utf-8' )]
def __lowerCAmelCase( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
import requests
monkeypatch.setattr(_SCREAMING_SNAKE_CASE , 'request' , _SCREAMING_SNAKE_CASE )
_A = URL
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = url
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [url]
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = {'train': url}
_A = 'dummy'
_A = 'downloads'
_A = tmp_path
_A = DownloadConfig(
cache_dir=os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , use_etag=_SCREAMING_SNAKE_CASE , )
_A = DownloadManager(dataset_name=_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
_A = dl_manager.download(_SCREAMING_SNAKE_CASE )
_A = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [downloaded_paths]
_A = [urls]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert "train" in downloaded_paths.keys()
_A = downloaded_paths.values()
_A = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_A = Path(_SCREAMING_SNAKE_CASE )
_A = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_A = downloaded_path.read_text()
assert content == CONTENT
_A = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_A = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
_A = str(_SCREAMING_SNAKE_CASE )
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = filename
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [filename]
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = {'train': filename}
_A = 'dummy'
_A = xz_file.parent
_A = 'extracted'
_A = DownloadConfig(
cache_dir=_SCREAMING_SNAKE_CASE , use_etag=_SCREAMING_SNAKE_CASE , )
_A = DownloadManager(dataset_name=_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
_A = dl_manager.extract(_SCREAMING_SNAKE_CASE )
_A = paths
for extracted_paths in [extracted_paths]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [extracted_paths]
_A = [paths]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert "train" in extracted_paths.keys()
_A = extracted_paths.values()
_A = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_A = Path(_SCREAMING_SNAKE_CASE )
_A = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_SCREAMING_SNAKE_CASE , etag=_SCREAMING_SNAKE_CASE )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_A = extracted_path.read_text()
_A = text_file.read_text()
assert extracted_file_content == expected_file_content
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
assert path.endswith('.jsonl' )
for num_items, line in enumerate(_SCREAMING_SNAKE_CASE , start=1 ):
_A = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = request.getfixturevalue(_SCREAMING_SNAKE_CASE )
_A = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ):
_test_jsonl(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_A = request.getfixturevalue(_SCREAMING_SNAKE_CASE )
_A = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ):
_test_jsonl(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert num_tar == 1
assert num_jsonl == 2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) , start=1 ):
assert os.path.basename(_SCREAMING_SNAKE_CASE ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 27 | 0 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__SCREAMING_SNAKE_CASE = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __snake_case :
"""simple docstring"""
def __init__( self :Union[str, Any] , UpperCamelCase__ :str , UpperCamelCase__ :Optional[int]=16 , UpperCamelCase__ :List[str]=13 , UpperCamelCase__ :Dict=7 , UpperCamelCase__ :List[Any]=14 , UpperCamelCase__ :Union[str, Any]=10 , UpperCamelCase__ :List[str]=19 , UpperCamelCase__ :int=5 , UpperCamelCase__ :List[Any]=4 , UpperCamelCase__ :int=True , UpperCamelCase__ :int=16 , UpperCamelCase__ :Union[str, Any]=2 , UpperCamelCase__ :List[Any]=4 , UpperCamelCase__ :int=4 , UpperCamelCase__ :str="gelu" , UpperCamelCase__ :int=0.1 , UpperCamelCase__ :Union[str, Any]=0.1 , UpperCamelCase__ :List[Any]=[1, 2, 3, 4, 5] , UpperCamelCase__ :int=25 , UpperCamelCase__ :Any=5 , ):
_a = d_model
_a = parent
_a = batch_size
_a = prediction_length
_a = context_length
_a = cardinality
_a = num_time_features
_a = lags_sequence
_a = embedding_dimension
_a = is_training
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = context_length
_a = prediction_length + label_length
_a = label_length
_a = moving_average
_a = autocorrelation_factor
def SCREAMING_SNAKE_CASE_ ( self :Any ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , UpperCamelCase__ :List[Any] ):
_a = config.context_length + max(config.lags_sequence )
_a = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_a = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, _past_length] )
_a = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_a = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, config.prediction_length] )
_a = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self :Any ):
_a = self.get_config()
_a = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a , _a = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self :List[Any] , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :str ):
_a = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
_a = model(**snake_case_ )
_a = outputs.encoder_last_hidden_state
_a = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_encoder()
encoder.save_pretrained(snake_case_ )
_a = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
_a , _a , _a , _a , _a = model.create_network_inputs(**snake_case_ )
_a , _a = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_a = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_a = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_a = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_a = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_a = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_a = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_decoder()
decoder.save_pretrained(snake_case_ )
_a = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
_a = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __snake_case ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowerCAmelCase_ : Optional[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
lowerCAmelCase_ : str = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Any = False
lowerCAmelCase_ : Tuple = False
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
_a = AutoformerModelTester(self )
_a = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self :int ):
_a , _a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_a = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
_a , _a = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["missing_keys"] , [] )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="Model has no tokens embeddings" )
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
pass
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
_a = inspect.signature(getattr(snake_case_ , "forward" ) )
# The main input is the name of the argument after `self`
_a = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(snake_case_ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def SCREAMING_SNAKE_CASE_ ( self :str ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
_a = getattr(self.model_tester , "seq_length" , snake_case_ )
_a = getattr(self.model_tester , "decoder_seq_length" , snake_case_ )
_a = getattr(self.model_tester , "encoder_seq_length" , snake_case_ )
_a = getattr(self.model_tester , "d_model" , snake_case_ )
_a = getattr(self.model_tester , "num_attention_heads" , snake_case_ )
_a = d_model // num_attention_heads
for model_class in self.all_model_classes:
_a = True
_a = False
_a = True
_a = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_a = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_a = len(snake_case_ )
_a = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
_a = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_a = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_a = True
_a = True
_a = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self :int ):
super().test_retain_grad_hidden_states_attentions()
def __a ( a="train-batch.pt" ):
"""simple docstring"""
_a = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch", filename=_SCREAMING_SNAKE_CASE, repo_type="dataset" )
_a = torch.load(_SCREAMING_SNAKE_CASE, map_location=_SCREAMING_SNAKE_CASE )
return batch
@require_torch
@slow
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_a = prepare_batch()
with torch.no_grad():
_a = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
_a = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
_a = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
_a = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_a = prepare_batch("val-batch.pt" )
with torch.no_grad():
_a = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
_a = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
_a = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def SCREAMING_SNAKE_CASE_ ( self :Any ):
_a = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_a = prepare_batch("val-batch.pt" )
with torch.no_grad():
_a = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
_a = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
_a = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=snake_case_ )
_a = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1E-1 ) )
| 388 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
_A = int(number**0.5 )
return number == sq * sq
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> tuple[int, int]:
"""simple docstring"""
_A = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_A = x_den * y_den * z_den
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 35 ) -> int:
"""simple docstring"""
_A = set()
_A = 42
_A = Fraction(0 )
_A = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_A = x_num * y_den + x_den * y_num
_A = x_den * y_den
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_A = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_A = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
_A = x_num * y_num
_A = x_den * y_num + x_num * y_den
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_A = x_num * x_num * y_num * y_num
_A = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_A = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"{solution() = }")
| 27 | 0 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : Union[str, Any] = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=snake_case_ , dtype=jnp.bfloataa )
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=snake_case_ , from_pt=snake_case_ , dtype=jnp.bfloataa )
__lowerCAmelCase : Tuple = controlnet_params
__lowerCAmelCase : Union[str, Any] = '''bird'''
__lowerCAmelCase : Dict = jax.device_count()
__lowerCAmelCase : Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
__lowerCAmelCase : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
__lowerCAmelCase : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
__lowerCAmelCase : List[str] = jax.random.PRNGKey(0 )
__lowerCAmelCase : Tuple = jax.random.split(snake_case_ , jax.device_count() )
__lowerCAmelCase : Union[str, Any] = replicate(snake_case_ )
__lowerCAmelCase : Optional[Any] = shard(snake_case_ )
__lowerCAmelCase : Tuple = shard(snake_case_ )
__lowerCAmelCase : Tuple = pipe(
prompt_ids=snake_case_ , image=snake_case_ , params=snake_case_ , prng_seed=snake_case_ , num_inference_steps=50 , jit=snake_case_ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__lowerCAmelCase : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowerCAmelCase : Union[str, Any] = images[0, 253:256, 253:256, -1]
__lowerCAmelCase : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowerCAmelCase : Union[str, Any] = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : Any = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=snake_case_ , dtype=jnp.bfloataa )
__lowerCAmelCase, __lowerCAmelCase : List[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=snake_case_ , from_pt=snake_case_ , dtype=jnp.bfloataa )
__lowerCAmelCase : Optional[Any] = controlnet_params
__lowerCAmelCase : List[str] = '''Chef in the kitchen'''
__lowerCAmelCase : Union[str, Any] = jax.device_count()
__lowerCAmelCase : Union[str, Any] = pipe.prepare_text_inputs([prompts] * num_samples )
__lowerCAmelCase : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
__lowerCAmelCase : Optional[Any] = pipe.prepare_image_inputs([pose_image] * num_samples )
__lowerCAmelCase : int = jax.random.PRNGKey(0 )
__lowerCAmelCase : Tuple = jax.random.split(snake_case_ , jax.device_count() )
__lowerCAmelCase : Optional[Any] = replicate(snake_case_ )
__lowerCAmelCase : Dict = shard(snake_case_ )
__lowerCAmelCase : Dict = shard(snake_case_ )
__lowerCAmelCase : Optional[Any] = pipe(
prompt_ids=snake_case_ , image=snake_case_ , params=snake_case_ , prng_seed=snake_case_ , num_inference_steps=50 , jit=snake_case_ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__lowerCAmelCase : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowerCAmelCase : int = images[0, 253:256, 253:256, -1]
__lowerCAmelCase : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowerCAmelCase : str = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 492 |
from __future__ import annotations
import math
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
if num <= 0:
_A = F"{num}: Invalid input, please enter a positive integer."
raise ValueError(_SCREAMING_SNAKE_CASE )
_A = [True] * (num + 1)
_A = []
_A = 2
_A = int(math.sqrt(_SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , _SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
_A = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 27 | 0 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __snake_case :
_a : Union[str, Any]= None
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowercase : List[str] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] ,snake_case_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Tuple = os.path.join(snake_case_ ,"""feat_extract.json""" )
feat_extract_first.to_json_file(snake_case_ )
lowercase : Optional[Any] = self.feature_extraction_class.from_json_file(snake_case_ )
self.assertEqual(feat_extract_second.to_dict() ,feat_extract_first.to_dict() )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Tuple = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
lowercase : Dict = self.feature_extraction_class.from_pretrained(snake_case_ )
self.assertEqual(feat_extract_second.to_dict() ,feat_extract_first.to_dict() )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = self.feature_extraction_class()
self.assertIsNotNone(snake_case_ )
| 336 |
__A : Dict = "Alexander Joslin"
import operator as op
from .stack import Stack
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
_A = Stack()
_A = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(_SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
_A = operator_stack.peek()
operator_stack.pop()
_A = operand_stack.peek()
operand_stack.pop()
_A = operand_stack.peek()
operand_stack.pop()
_A = operators[opr](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
operand_stack.push(_SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__A : Any = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 27 | 0 |
import argparse
import datetime
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str:
"""simple docstring"""
a = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
a = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_SCREAMING_SNAKE_CASE ) < 1_1:
raise ValueError('''Must be 10 characters long''' )
# Get month
a = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 1_3:
raise ValueError('''Month must be between 1 - 12''' )
a = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
a = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 3_2:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
a = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
a = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
a = datetime.date(int(_SCREAMING_SNAKE_CASE ), int(_SCREAMING_SNAKE_CASE ), int(_SCREAMING_SNAKE_CASE ) )
# Start math
if m <= 2:
a = y - 1
a = m + 1_2
# maths var
a = int(str(_SCREAMING_SNAKE_CASE )[:2] )
a = int(str(_SCREAMING_SNAKE_CASE )[2:] )
a = int(2.6 * m - 5.39 )
a = int(c / 4 )
a = int(k / 4 )
a = int(d + k )
a = int(t + u + v + x )
a = int(z - (2 * c) )
a = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
a = f"""Your date {date_input}, is a {days[str(_SCREAMING_SNAKE_CASE )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : Tuple = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
UpperCamelCase__ : Tuple = parser.parse_args()
zeller(args.date_input)
| 387 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
_A = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def lowerCAmelCase__ ( self ):
_A = self.dummy_uncond_unet
_A = KarrasVeScheduler()
_A = KarrasVePipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=2 , generator=snake_case_ , output_type='numpy' ).images
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=2 , generator=snake_case_ , output_type='numpy' , return_dict=snake_case_ )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
_A = 'google/ncsnpp-celebahq-256'
_A = UNetaDModel.from_pretrained(snake_case_ )
_A = KarrasVeScheduler()
_A = KarrasVePipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=20 , generator=snake_case_ , output_type='numpy' ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_A = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 27 | 0 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_UpperCAmelCase = ''''''
_UpperCAmelCase = ''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_SCREAMING_SNAKE_CASE ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_UpperCAmelCase , _UpperCAmelCase = 0, 0
# length[i] shows the length of palindromic substring with center i
_UpperCAmelCase = [1 for i in range(len(_SCREAMING_SNAKE_CASE ) )]
# for each character in new_string find corresponding palindromic string
_UpperCAmelCase = 0
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_SCREAMING_SNAKE_CASE )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_UpperCAmelCase = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_UpperCAmelCase = j - k + 1 # noqa: E741
_UpperCAmelCase = j + k - 1
# update max_length and start position
if max_length < length[j]:
_UpperCAmelCase = length[j]
_UpperCAmelCase = j
# create that string
_UpperCAmelCase = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 602 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__A : str = random.Random()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
if rng is None:
_A = global_rng
_A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=400 , snake_case_=2000 , snake_case_=2048 , snake_case_=128 , snake_case_=1 , snake_case_=512 , snake_case_=30 , snake_case_=4_4100 , ):
_A = parent
_A = batch_size
_A = min_seq_length
_A = max_seq_length
_A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A = spectrogram_length
_A = feature_size
_A = num_audio_channels
_A = hop_length
_A = chunk_length
_A = sampling_rate
def lowerCAmelCase__ ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCAmelCase__ ( self , snake_case_=False , snake_case_=False ):
def _flatten(snake_case_ ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
_A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase( __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = TvltFeatureExtractor
def lowerCAmelCase__ ( self ):
_A = TvltFeatureExtractionTester(self )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(snake_case_ , 'feature_size' ) )
self.assertTrue(hasattr(snake_case_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(snake_case_ , 'hop_length' ) )
self.assertTrue(hasattr(snake_case_ , 'chunk_length' ) )
self.assertTrue(hasattr(snake_case_ , 'sampling_rate' ) )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
_A = self.feature_extraction_class.from_pretrained(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(snake_case_ , 'feat_extract.json' )
feat_extract_first.to_json_file(snake_case_ )
_A = self.feature_extraction_class.from_json_file(snake_case_ )
_A = feat_extract_first.to_dict()
_A = feat_extract_second.to_dict()
_A = dict_first.pop('mel_filters' )
_A = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self ):
# Initialize feature_extractor
_A = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test not batched input
_A = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A = feature_extractor(
snake_case_ , return_tensors='np' , sampling_rate=4_4100 , mask_audio=snake_case_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A = np.asarray(snake_case_ )
_A = feature_extractor(snake_case_ , return_tensors='np' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCAmelCase__ ( self , snake_case_ ):
_A = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_A = ds.sort('id' ).select(range(snake_case_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self ):
_A = self._load_datasamples(1 )
_A = TvltFeatureExtractor()
_A = feature_extractor(snake_case_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case_ , atol=1E-4 ) )
| 27 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ) -> Tuple:
_snake_case = original_name.split('''.''' )[0]
_snake_case = key.split('''.''' )
_snake_case = int(key_list[key_list.index(_SCREAMING_SNAKE_CASE ) - 2] )
_snake_case = int(key_list[key_list.index(_SCREAMING_SNAKE_CASE ) - 1] )
_snake_case = orig_block_num - offset
_snake_case = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' , f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def _UpperCAmelCase ( __lowerCamelCase : List[Any] ) -> Optional[Any]:
_snake_case = OrderedDict()
_snake_case , _snake_case = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
_snake_case = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
_snake_case = key[: key.find('''proj''' )]
_snake_case = key.replace(_SCREAMING_SNAKE_CASE , f'''patch_embeddings.{total_embed_found}.''' )
_snake_case = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
_snake_case = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
_snake_case = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
_snake_case = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
_snake_case = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''norm1''' , '''before_norm''' )
if "norm2" in key:
_snake_case = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
_snake_case = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
_snake_case = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
_snake_case = key.replace('''head''' , '''classifier''' )
_snake_case = value
return new_state_dict
def _UpperCAmelCase ( ) -> List[Any]:
_snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_snake_case = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ) -> str:
_snake_case = PoolFormerConfig()
# set attributes based on model_name
_snake_case = '''huggingface/label-files'''
_snake_case = model_name[-3:]
_snake_case = 10_00
_snake_case = '''imagenet-1k-id2label.json'''
_snake_case = (1, 10_00)
# set config attributes
_snake_case = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
_snake_case = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
if size == "s12":
_snake_case = [2, 2, 6, 2]
_snake_case = [64, 1_28, 3_20, 5_12]
_snake_case = 4.0
_snake_case = 0.9
elif size == "s24":
_snake_case = [4, 4, 12, 4]
_snake_case = [64, 1_28, 3_20, 5_12]
_snake_case = 4.0
_snake_case = 0.9
elif size == "s36":
_snake_case = [6, 6, 18, 6]
_snake_case = [64, 1_28, 3_20, 5_12]
_snake_case = 4.0
_snake_case = 1E-6
_snake_case = 0.9
elif size == "m36":
_snake_case = [6, 6, 18, 6]
_snake_case = [96, 1_92, 3_84, 7_68]
_snake_case = 4.0
_snake_case = 1E-6
_snake_case = 0.95
elif size == "m48":
_snake_case = [8, 8, 24, 8]
_snake_case = [96, 1_92, 3_84, 7_68]
_snake_case = 4.0
_snake_case = 1E-6
_snake_case = 0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
_snake_case = PoolFormerImageProcessor(crop_pct=_SCREAMING_SNAKE_CASE )
# Prepare image
_snake_case = prepare_img()
_snake_case = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
_snake_case = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) )
# rename keys
_snake_case = rename_keys(_SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
_snake_case = PoolFormerForImageClassification(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
_snake_case = PoolFormerImageProcessor(crop_pct=_SCREAMING_SNAKE_CASE )
_snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
_snake_case = model(_SCREAMING_SNAKE_CASE )
_snake_case = outputs.logits
# define expected logit slices for different models
if size == "s12":
_snake_case = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
_snake_case = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
_snake_case = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
_snake_case = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
_snake_case = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCAmelCase__ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 224 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('check_bouncy() accepts only integer arguments' )
_A = str(_SCREAMING_SNAKE_CASE )
_A = ''.join(sorted(_SCREAMING_SNAKE_CASE ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError('solution() only accepts values from 0 to 100' )
_A = 0
_A = 1
while True:
if check_bouncy(_SCREAMING_SNAKE_CASE ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"{solution(99)}")
| 27 | 0 |
def lowerCamelCase__ ( _a = 100):
SCREAMING_SNAKE_CASE : Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6
SCREAMING_SNAKE_CASE : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares)
if __name__ == "__main__":
print(F'''{solution() = }''') | 25 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"{price_plus_tax(100, 0.2_5) = }")
print(f"{price_plus_tax(1_2_5.5_0, 0.0_5) = }")
| 27 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.