code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : str=7_68 ):
'''simple docstring'''
super().__init__(__lowerCAmelCase )
_snake_case : Dict = proj_size
_snake_case : Optional[Any] = CLIPVisionModel(__lowerCAmelCase )
_snake_case : int = PaintByExampleMapper(__lowerCAmelCase )
_snake_case : Optional[int] = nn.LayerNorm(config.hidden_size )
_snake_case : Union[str, Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
_snake_case : Optional[Any] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def UpperCamelCase_ ( self : Any , UpperCamelCase : int , UpperCamelCase : Tuple=False ):
'''simple docstring'''
_snake_case : str = self.model(pixel_values=__lowerCAmelCase )
_snake_case : Optional[int] = clip_output.pooler_output
_snake_case : int = self.mapper(latent_states[:, None] )
_snake_case : Optional[Any] = self.final_layer_norm(__lowerCAmelCase )
_snake_case : Any = self.proj_out(__lowerCAmelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : List[str] ):
'''simple docstring'''
super().__init__()
_snake_case : Any = (config.num_hidden_layers + 1) // 5
_snake_case : Tuple = config.hidden_size
_snake_case : List[str] = 1
_snake_case : Dict = nn.ModuleList(
[
BasicTransformerBlock(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , activation_fn='gelu' , attention_bias=__lowerCAmelCase )
for _ in range(__lowerCAmelCase )
] )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any ):
'''simple docstring'''
for block in self.blocks:
_snake_case : Optional[int] = block(__lowerCAmelCase )
return hidden_states
| 710 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> list:
_snake_case : List[Any] = int(lowerCAmelCase )
if n_element < 1:
_snake_case : int = ValueError('a should be a positive number' )
raise my_error
_snake_case : Union[str, Any] = [1]
_snake_case , _snake_case , _snake_case : Any = (0, 0, 0)
_snake_case : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 669 | 0 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger()
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : nn.Module
a_ : List[nn.Module] =field(default_factory=__UpperCAmelCase )
a_ : list =field(default_factory=__UpperCAmelCase )
def UpperCamelCase_ ( self : Any , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = len(list(m.modules() ) ) == 1 or isinstance(UpperCAmelCase_ , nn.Convad ) or isinstance(UpperCAmelCase_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCAmelCase_ )
def __call__( self : Optional[int] , UpperCamelCase : Any ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCAmelCase_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return list(filter(lambda UpperCamelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : nn.Module
a_ : nn.Module
a_ : int =0
a_ : List =field(default_factory=__UpperCAmelCase )
a_ : List =field(default_factory=__UpperCAmelCase )
def __call__( self : Optional[Any] , UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : str = Tracker(self.dest )(UpperCAmelCase_ ).parametrized
_snake_case : List[str] = Tracker(self.src )(UpperCAmelCase_ ).parametrized
_snake_case : str = list(filter(lambda UpperCamelCase : type(UpperCAmelCase_ ) not in self.src_skip , UpperCAmelCase_ ) )
_snake_case : Any = list(filter(lambda UpperCamelCase : type(UpperCAmelCase_ ) not in self.dest_skip , UpperCAmelCase_ ) )
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(UpperCAmelCase_ )} operations while"""
f""" destination module has {len(UpperCAmelCase_ )}.""" )
for dest_m, src_m in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Any , lowerCAmelCase: str , lowerCAmelCase: Tuple = True )-> int:
print(F"""Converting {name}...""" )
with torch.no_grad():
_snake_case : Dict = timm.create_model(_snake_case , pretrained=_snake_case ).eval()
_snake_case : Optional[int] = ResNetForImageClassification(_snake_case ).eval()
_snake_case : str = ModuleTransfer(src=_snake_case , dest=_snake_case )
_snake_case : Optional[Any] = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(_snake_case )
assert torch.allclose(from_model(_snake_case ) , our_model(_snake_case ).logits ), "The model logits don't match the original one."
_snake_case : Tuple = F"""resnet{'-'.join(name.split('resnet' ) )}"""
print(_snake_case )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=_snake_case , )
# we can use the convnext one
_snake_case : List[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=_snake_case , )
print(F"""Pushed {checkpoint_name}""" )
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: List[str] = None , lowerCAmelCase: List[str] = True )-> Dict:
_snake_case : Tuple = 'imagenet-1k-id2label.json'
_snake_case : List[str] = 10_00
_snake_case : int = (1, num_labels)
_snake_case : List[str] = 'huggingface/label-files'
_snake_case : Union[str, Any] = num_labels
_snake_case : Optional[Any] = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='dataset' ) , 'r' ) )
_snake_case : List[Any] = {int(_snake_case ): v for k, v in idalabel.items()}
_snake_case : int = idalabel
_snake_case : Optional[Any] = {v: k for k, v in idalabel.items()}
_snake_case : Optional[Any] = partial(_snake_case , num_labels=_snake_case , idalabel=_snake_case , labelaid=_snake_case )
_snake_case : Optional[int] = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(_snake_case , names_to_config[model_name] , _snake_case , _snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_snake_case , _snake_case , _snake_case , _snake_case )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 711 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Tuple="shi-labs/oneformer_demo" )-> Any:
with open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) as f:
_snake_case : str = json.load(lowerCAmelCase )
_snake_case : List[str] = {}
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = []
for key, info in class_info.items():
_snake_case : Optional[int] = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase ) )
_snake_case : List[str] = thing_ids
_snake_case : Optional[Any] = class_names
return metadata
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any=7 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Dict=30 , UpperCamelCase : int=4_00 , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : str=True , UpperCamelCase : Any=[0.5, 0.5, 0.5] , UpperCamelCase : int=[0.5, 0.5, 0.5] , UpperCamelCase : Dict=10 , UpperCamelCase : Dict=False , UpperCamelCase : Dict=2_55 , UpperCamelCase : Dict="shi-labs/oneformer_demo" , UpperCamelCase : Optional[int]="ade20k_panoptic.json" , UpperCamelCase : Tuple=10 , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Tuple = num_channels
_snake_case : List[str] = min_resolution
_snake_case : List[str] = max_resolution
_snake_case : Optional[Any] = do_resize
_snake_case : Optional[Any] = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
_snake_case : Optional[int] = do_normalize
_snake_case : Any = image_mean
_snake_case : List[Any] = image_std
_snake_case : Any = class_info_file
_snake_case : List[str] = prepare_metadata(UpperCamelCase , UpperCamelCase )
_snake_case : Any = num_text
_snake_case : str = repo_path
# for the post_process_functions
_snake_case : Optional[Any] = 2
_snake_case : str = 10
_snake_case : Union[str, Any] = 10
_snake_case : List[Any] = 3
_snake_case : str = 4
_snake_case : List[Any] = num_labels
_snake_case : str = do_reduce_labels
_snake_case : List[str] = ignore_index
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
if not batched:
_snake_case : Any = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
_snake_case , _snake_case : Any = image.size
else:
_snake_case , _snake_case : Any = image.shape[1], image.shape[2]
if w < h:
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
_snake_case : Any = self.size['shortest_edge']
elif w > h:
_snake_case : int = self.size['shortest_edge']
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
_snake_case : Dict = self.size['shortest_edge']
_snake_case : Dict = self.size['shortest_edge']
else:
_snake_case : List[Any] = []
for image in image_inputs:
_snake_case , _snake_case : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : List[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
_snake_case : Optional[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ : Any =image_processing_class
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCamelCase , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCamelCase , 'num_text' ) )
self.assertTrue(hasattr(UpperCamelCase , 'repo_path' ) )
self.assertTrue(hasattr(UpperCamelCase , 'metadata' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_reduce_labels' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : Optional[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : List[Any] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : int = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
_snake_case : int = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : Optional[int] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
_snake_case : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : List[str] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Tuple=False , UpperCamelCase : str=False , UpperCamelCase : Dict="np" ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_snake_case : List[str] = self.image_processing_tester.num_labels
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
if with_segmentation_maps:
_snake_case : Optional[int] = num_labels
if is_instance_map:
_snake_case : Union[str, Any] = list(range(UpperCamelCase ) ) * 2
_snake_case : Tuple = dict(enumerate(UpperCamelCase ) )
_snake_case : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_snake_case : int = [Image.fromarray(UpperCamelCase ) for annotation in annotations]
_snake_case : List[Any] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , UpperCamelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCamelCase , pad_and_return_pixel_mask=UpperCamelCase , )
return inputs
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
def common(UpperCamelCase : Any=False , UpperCamelCase : int=None ):
_snake_case : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase , is_instance_map=UpperCamelCase , segmentation_type=UpperCamelCase )
_snake_case : Union[str, Any] = inputs['mask_labels']
_snake_case : Optional[int] = inputs['class_labels']
_snake_case : Optional[int] = inputs['pixel_values']
_snake_case : Optional[Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.zeros((20, 50) )
_snake_case : int = 1
_snake_case : int = 1
_snake_case : Optional[Any] = 1
_snake_case : List[Any] = binary_mask_to_rle(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = fature_extractor.post_process_semantic_segmentation(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_snake_case : Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_snake_case : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(UpperCamelCase , target_sizes=UpperCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : int = image_processor.post_process_instance_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = image_processor.post_process_panoptic_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 669 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _lowerCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
a_ : str ="""gptj"""
a_ : Optional[Any] ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Union[str, Any] , UpperCamelCase : Optional[Any]=5_04_00 , UpperCamelCase : Tuple=20_48 , UpperCamelCase : Optional[int]=40_96 , UpperCamelCase : List[str]=28 , UpperCamelCase : List[Any]=16 , UpperCamelCase : str=64 , UpperCamelCase : str=None , UpperCamelCase : Tuple="gelu_new" , UpperCamelCase : List[str]=0.0 , UpperCamelCase : Any=0.0 , UpperCamelCase : List[Any]=0.0 , UpperCamelCase : Optional[Any]=1e-5 , UpperCamelCase : str=0.02 , UpperCamelCase : List[Any]=True , UpperCamelCase : Dict=5_02_56 , UpperCamelCase : Any=5_02_56 , UpperCamelCase : int=False , **UpperCamelCase : str , ):
'''simple docstring'''
_snake_case : int = vocab_size
_snake_case : List[str] = n_positions
_snake_case : int = n_embd
_snake_case : Tuple = n_layer
_snake_case : Any = n_head
_snake_case : Union[str, Any] = n_inner
_snake_case : List[Any] = rotary_dim
_snake_case : Optional[int] = activation_function
_snake_case : Dict = resid_pdrop
_snake_case : Dict = embd_pdrop
_snake_case : Dict = attn_pdrop
_snake_case : Any = layer_norm_epsilon
_snake_case : int = initializer_range
_snake_case : str = use_cache
_snake_case : Optional[int] = bos_token_id
_snake_case : str = eos_token_id
super().__init__(
bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ )
class _lowerCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase : PretrainedConfig , UpperCamelCase : str = "default" , UpperCamelCase : List[PatchingSpec] = None , UpperCamelCase : bool = False , ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , task=UpperCamelCase__ , patching_specs=UpperCamelCase__ , use_past=UpperCamelCase__ )
if not getattr(self._config , 'pad_token_id' , UpperCamelCase__ ):
# TODO: how to do that better?
_snake_case : Any = 0
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
_snake_case : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_snake_case : List[str] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self._config.n_layer
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self._config.n_head
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
_snake_case : List[str] = super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
_snake_case : Any = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case : Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_snake_case : int = seqlen + 2
_snake_case : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case : Union[str, Any] = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
_snake_case : str = common_inputs['''attention_mask''']
if self.use_past:
_snake_case : List[Any] = ordered_inputs['''attention_mask'''].dtype
_snake_case : int = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return 13
| 712 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase_ = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCamelCase_ ( )-> Tuple:
_snake_case : int = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_snake_case : int = get_sagemaker_input()
else:
_snake_case : Any = get_cluster_input()
return config
def lowerCamelCase_ ( lowerCAmelCase: str=None )-> Any:
if subparsers is not None:
_snake_case : List[Any] = subparsers.add_parser('config' , description=lowerCAmelCase )
else:
_snake_case : Dict = argparse.ArgumentParser('Accelerate config command' , description=lowerCAmelCase )
parser.add_argument(
'--config_file' , default=lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Any:
_snake_case : Dict = get_user_input()
if args.config_file is not None:
_snake_case : List[str] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
_snake_case : Union[str, Any] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowerCAmelCase )
else:
config.to_yaml_file(lowerCAmelCase )
print(F"""accelerate configuration saved at {config_file}""" )
def lowerCamelCase_ ( )-> Dict:
_snake_case : List[str] = config_command_parser()
_snake_case : str = parser.parse_args()
config_command(lowerCAmelCase )
if __name__ == "__main__":
main()
| 669 | 0 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: Any , lowerCAmelCase: Tuple , lowerCAmelCase: str , lowerCAmelCase: Tuple , lowerCAmelCase: Any , lowerCAmelCase: Any , lowerCAmelCase: int , lowerCAmelCase: str , lowerCAmelCase: List[str] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Union[str, Any] , )-> Optional[Any]:
_snake_case : Optional[Any] = {
'7z': (seven_zip_file, SevenZipExtractor),
'bz2': (bza_file, BzipaExtractor),
'gzip': (gz_file, GzipExtractor),
'lz4': (lza_file, LzaExtractor),
'tar': (tar_file, TarExtractor),
'xz': (xz_file, XzExtractor),
'zip': (zip_file, ZipExtractor),
'zstd': (zstd_file, ZstdExtractor),
}
_snake_case , _snake_case : int = input_paths_and_base_extractors[compression_format]
if input_path is None:
_snake_case : Any = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__UpperCamelCase )
assert base_extractor.is_extractable(__UpperCamelCase )
_snake_case : Dict = tmp_path / ('extracted' if is_archive else 'extracted.txt')
base_extractor.extract(__UpperCamelCase , __UpperCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_snake_case : int = file_path.read_text(encoding='utf-8' )
else:
_snake_case : Tuple = output_path.read_text(encoding='utf-8' )
_snake_case : Tuple = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: int , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: str , lowerCAmelCase: Any , lowerCAmelCase: Dict , lowerCAmelCase: str , )-> Union[str, Any]:
_snake_case : Dict = {
'7z': seven_zip_file,
'bz2': bza_file,
'gzip': gz_file,
'lz4': lza_file,
'tar': tar_file,
'xz': xz_file,
'zip': zip_file,
'zstd': zstd_file,
}
_snake_case : Dict = input_paths[compression_format]
if input_path is None:
_snake_case : List[str] = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__UpperCamelCase )
_snake_case : List[str] = Extractor.infer_extractor_format(__UpperCamelCase )
assert extractor_format is not None
_snake_case : int = tmp_path / ('extracted' if is_archive else 'extracted.txt')
Extractor.extract(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_snake_case : Union[str, Any] = file_path.read_text(encoding='utf-8' )
else:
_snake_case : Optional[int] = output_path.read_text(encoding='utf-8' )
_snake_case : Dict = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCamelCase_ ( lowerCAmelCase: Dict , lowerCAmelCase: Dict )-> int:
import tarfile
_snake_case : Dict = tmp_path / 'data_dot_dot'
directory.mkdir()
_snake_case : int = directory / 'tar_file_with_dot_dot.tar'
with tarfile.TarFile(__UpperCamelCase , 'w' ) as f:
f.add(__UpperCamelCase , arcname=os.path.join('..' , text_file.name ) )
return path
@pytest.fixture
def lowerCamelCase_ ( lowerCAmelCase: List[Any] )-> List[Any]:
import tarfile
_snake_case : List[Any] = tmp_path / 'data_sym_link'
directory.mkdir()
_snake_case : Optional[int] = directory / 'tar_file_with_sym_link.tar'
os.symlink('..' , directory / 'subdir' , target_is_directory=__UpperCamelCase )
with tarfile.TarFile(__UpperCamelCase , 'w' ) as f:
f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: str , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any , lowerCAmelCase: int , lowerCAmelCase: Optional[int] )-> int:
_snake_case : Optional[Any] = {
'tar_file_with_dot_dot': tar_file_with_dot_dot,
'tar_file_with_sym_link': tar_file_with_sym_link,
}
_snake_case : Union[str, Any] = insecure_tar_files[insecure_tar_file]
_snake_case : Dict = tmp_path / 'extracted'
TarExtractor.extract(__UpperCamelCase , __UpperCamelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] )-> Any:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
_snake_case : Any = tmpdir / 'not_a_zip_file'
# From: https://github.com/python/cpython/pull/5053
_snake_case : Optional[int] = (
B'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'
B'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'
B'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'
B'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'
)
with not_a_zip_file.open('wb' ) as f:
f.write(__UpperCamelCase )
assert zipfile.is_zipfile(str(__UpperCamelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__UpperCamelCase ) # but we're right
| 713 |
# Function to print upper half of diamond (pyramid)
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> List[str]:
for i in range(0 , lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> List[Any]:
for i in range(lowerCAmelCase , 0 , -1 ):
for _ in range(lowerCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> int:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase ) # upper half
reverse_floyd(lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
lowerCAmelCase_ = 1
while K:
lowerCAmelCase_ = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
lowerCAmelCase_ = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 669 | 0 |
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple ):
'''simple docstring'''
_snake_case : dict[str, TrieNode] = {} # Mapping from char to TrieNode
_snake_case : List[Any] = False
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : List[Any] = self
for char in word:
if char not in curr.nodes:
_snake_case : List[Any] = TrieNode()
_snake_case : int = curr.nodes[char]
_snake_case : Union[str, Any] = True
def UpperCamelCase_ ( self : str , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
return False
_snake_case : Dict = curr.nodes[char]
return curr.is_leaf
def UpperCamelCase_ ( self : Dict , UpperCamelCase : str ):
'''simple docstring'''
def _delete(UpperCamelCase : TrieNode , UpperCamelCase : str , UpperCamelCase : int ) -> bool:
if index == len(UpperCamelCase ):
# If word does not exist
if not curr.is_leaf:
return False
_snake_case : Optional[Any] = False
return len(curr.nodes ) == 0
_snake_case : str = word[index]
_snake_case : str = curr.nodes.get(UpperCamelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_snake_case : int = _delete(UpperCamelCase , UpperCamelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCamelCase , 0 )
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Any )-> Tuple:
if node.is_leaf:
print(lowerCAmelCase , end=' ' )
for key, value in node.nodes.items():
print_words(lowerCAmelCase , word + key )
def lowerCamelCase_ ( )-> int:
_snake_case : int = """banana bananas bandana band apple all beast""".split()
_snake_case : int = TrieNode()
root.insert_many(lowerCAmelCase )
# print_words(root, "")
assert all(root.find(lowerCAmelCase ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[str] )-> List[str]:
print(str(lowerCAmelCase ) , 'works!' if passes else 'doesn\'t work :(' )
def lowerCamelCase_ ( )-> Any:
assert test_trie()
def lowerCamelCase_ ( )-> str:
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 714 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""audio-spectrogram-transformer"""
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any]=7_68 , UpperCamelCase : int=12 , UpperCamelCase : str=12 , UpperCamelCase : Tuple=30_72 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Any=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : str=16 , UpperCamelCase : List[Any]=True , UpperCamelCase : Any=10 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : int=10_24 , UpperCamelCase : Optional[Any]=1_28 , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : Tuple = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : int = patch_size
_snake_case : List[str] = qkv_bias
_snake_case : int = frequency_stride
_snake_case : List[Any] = time_stride
_snake_case : List[Any] = max_length
_snake_case : List[str] = num_mel_bins
| 669 | 0 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _lowerCAmelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Union[str, "sqlalchemy.sql.Selectable"] , UpperCamelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , UpperCamelCase : Optional[Features] = None , UpperCamelCase : str = None , UpperCamelCase : bool = False , **UpperCamelCase : Dict , ):
'''simple docstring'''
super().__init__(features=__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase , **__lowercase )
_snake_case : Any = Sql(
cache_dir=__lowercase , features=__lowercase , sql=__lowercase , con=__lowercase , **__lowercase , )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Any = None
_snake_case : List[str] = None
_snake_case : Any = None
_snake_case : Optional[int] = None
self.builder.download_and_prepare(
download_config=__lowercase , download_mode=__lowercase , verification_mode=__lowercase , base_path=__lowercase , )
# Build dataset for splits
_snake_case : List[Any] = self.builder.as_dataset(
split='train' , verification_mode=__lowercase , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , UpperCamelCase : Dataset , UpperCamelCase : str , UpperCamelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
_snake_case : Any = dataset
_snake_case : Tuple = name
_snake_case : List[str] = con
_snake_case : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_snake_case : str = num_proc
_snake_case : str = to_sql_kwargs
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : int = self.to_sql_kwargs.pop('sql' , __lowercase )
_snake_case : str = self.to_sql_kwargs.pop('con' , __lowercase )
_snake_case : List[Any] = self.to_sql_kwargs.pop('index' , __lowercase )
_snake_case : List[str] = self._write(index=__lowercase , **self.to_sql_kwargs )
return written
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Any ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : str = args
_snake_case : str = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
_snake_case : Dict = query_table(
table=self.dataset.data , key=slice(__lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
_snake_case : Optional[int] = batch.to_pandas()
_snake_case : List[str] = df.to_sql(self.name , self.con , index=__lowercase , **__lowercase )
return num_rows or len(__lowercase )
def UpperCamelCase_ ( self : int , UpperCamelCase : int , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_snake_case , _snake_case : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __lowercase , __lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 715 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: bool = True , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: bool = False , lowerCAmelCase: float = 1_00 , lowerCAmelCase: float = 0.0_1 , lowerCAmelCase: float = 1 , )-> Any:
_snake_case : int = False
_snake_case : Any = search_prob
_snake_case : Tuple = start_temperate
_snake_case : Any = []
_snake_case : List[str] = 0
_snake_case : Optional[Any] = None
while not search_end:
_snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case : Dict = current_state
scores.append(lowerCAmelCase )
iterations += 1
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case : Dict = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
_snake_case : int = neighbors.pop(lowerCAmelCase )
_snake_case : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case : Union[str, Any] = picked_neighbor
else:
_snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case : int = picked_neighbor
_snake_case : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case : List[str] = True
else:
_snake_case : Union[str, Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[Any] )-> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Dict:
return (3 * x**2) - (6 * y)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 669 | 0 |
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : str =['torch']
def __init__( self : Optional[Any] , *UpperCamelCase : List[str] , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *UpperCamelCase : Tuple , **UpperCamelCase : int ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *UpperCamelCase : int , **UpperCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Any =['torch']
def __init__( self : List[str] , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Any , *UpperCamelCase : int , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Dict =['torch']
def __init__( self : Any , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : int , *UpperCamelCase : Dict , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Optional[Any] =['torch']
def __init__( self : Any , *UpperCamelCase : int , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Dict , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Dict , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : int =['torch']
def __init__( self : Any , *UpperCamelCase : Optional[int] , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : int , *UpperCamelCase : int , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : int , *UpperCamelCase : str , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : str =['torch']
def __init__( self : Tuple , *UpperCamelCase : List[Any] , **UpperCamelCase : int ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : int , *UpperCamelCase : Dict , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : List[str] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Optional[Any] =['torch']
def __init__( self : Any , *UpperCamelCase : Dict , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Tuple , *UpperCamelCase : Tuple , **UpperCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *UpperCamelCase : int , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : int =['torch']
def __init__( self : int , *UpperCamelCase : Tuple , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *UpperCamelCase : str , **UpperCamelCase : int ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : List[str] , *UpperCamelCase : List[str] , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Optional[int] =['torch']
def __init__( self : Optional[Any] , *UpperCamelCase : Tuple , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Tuple , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Tuple =['torch']
def __init__( self : int , *UpperCamelCase : str , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Tuple , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Dict =['torch']
def __init__( self : int , *UpperCamelCase : List[Any] , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : int , *UpperCamelCase : Optional[int] , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
def lowerCamelCase_ ( *lowerCAmelCase: Dict , **lowerCAmelCase: Tuple )-> Tuple:
requires_backends(_lowerCAmelCase , ['torch'] )
def lowerCamelCase_ ( *lowerCAmelCase: int , **lowerCAmelCase: Any )-> Tuple:
requires_backends(_lowerCAmelCase , ['torch'] )
def lowerCamelCase_ ( *lowerCAmelCase: Optional[int] , **lowerCAmelCase: Dict )-> Union[str, Any]:
requires_backends(_lowerCAmelCase , ['torch'] )
def lowerCamelCase_ ( *lowerCAmelCase: Any , **lowerCAmelCase: Any )-> Union[str, Any]:
requires_backends(_lowerCAmelCase , ['torch'] )
def lowerCamelCase_ ( *lowerCAmelCase: Tuple , **lowerCAmelCase: str )-> Any:
requires_backends(_lowerCAmelCase , ['torch'] )
def lowerCamelCase_ ( *lowerCAmelCase: int , **lowerCAmelCase: Dict )-> List[str]:
requires_backends(_lowerCAmelCase , ['torch'] )
def lowerCamelCase_ ( *lowerCAmelCase: Union[str, Any] , **lowerCAmelCase: Optional[Any] )-> Optional[int]:
requires_backends(_lowerCAmelCase , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : int =['torch']
def __init__( self : Optional[int] , *UpperCamelCase : Optional[int] , **UpperCamelCase : int ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Any , *UpperCamelCase : Optional[int] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Tuple =['torch']
def __init__( self : Union[str, Any] , *UpperCamelCase : int , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : List[str] , *UpperCamelCase : Tuple , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : List[str] , *UpperCamelCase : int , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : str =['torch']
def __init__( self : Tuple , *UpperCamelCase : List[Any] , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *UpperCamelCase : List[Any] , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Any , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Any =['torch']
def __init__( self : Union[str, Any] , *UpperCamelCase : int , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Tuple , *UpperCamelCase : int , **UpperCamelCase : int ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *UpperCamelCase : Dict , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Union[str, Any] =['torch']
def __init__( self : Optional[int] , *UpperCamelCase : Tuple , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : str , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Optional[int] =['torch']
def __init__( self : List[Any] , *UpperCamelCase : Tuple , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : List[str] , *UpperCamelCase : List[str] , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Any , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : int =['torch']
def __init__( self : Dict , *UpperCamelCase : int , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Tuple , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Any , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Optional[int] =['torch']
def __init__( self : List[Any] , *UpperCamelCase : int , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : List[str] , *UpperCamelCase : Tuple , **UpperCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *UpperCamelCase : List[Any] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : str =['torch']
def __init__( self : Tuple , *UpperCamelCase : Dict , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *UpperCamelCase : Dict , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *UpperCamelCase : Any , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Tuple =['torch']
def __init__( self : List[Any] , *UpperCamelCase : Dict , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *UpperCamelCase : List[str] , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Dict =['torch']
def __init__( self : List[Any] , *UpperCamelCase : Any , **UpperCamelCase : int ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Tuple , *UpperCamelCase : int , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Tuple , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Dict =['torch']
def __init__( self : List[str] , *UpperCamelCase : List[str] , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Any , *UpperCamelCase : str , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Tuple , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : str =['torch']
def __init__( self : List[str] , *UpperCamelCase : Tuple , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : str , *UpperCamelCase : Optional[int] , **UpperCamelCase : int ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Optional[Any] =['torch']
def __init__( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Dict , *UpperCamelCase : List[Any] , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : str , *UpperCamelCase : int , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : str =['torch']
def __init__( self : int , *UpperCamelCase : List[str] , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : List[str] , *UpperCamelCase : int , **UpperCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : int ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Tuple =['torch']
def __init__( self : Optional[Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *UpperCamelCase : List[str] , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Any , *UpperCamelCase : Optional[Any] , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Tuple =['torch']
def __init__( self : Optional[Any] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *UpperCamelCase : Any , **UpperCamelCase : int ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : int , *UpperCamelCase : int , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Any =['torch']
def __init__( self : str , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Dict , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Tuple =['torch']
def __init__( self : str , *UpperCamelCase : int , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : int , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Tuple , *UpperCamelCase : Dict , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Any =['torch']
def __init__( self : Optional[Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Tuple , *UpperCamelCase : List[Any] , **UpperCamelCase : int ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Any , *UpperCamelCase : str , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : int =['torch']
def __init__( self : int , *UpperCamelCase : Tuple , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : int , *UpperCamelCase : Dict , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Optional[int] =['torch']
def __init__( self : Union[str, Any] , *UpperCamelCase : str , **UpperCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Dict , *UpperCamelCase : List[str] , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *UpperCamelCase : Dict , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : str =['torch']
def __init__( self : Optional[int] , *UpperCamelCase : Tuple , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Any , *UpperCamelCase : int , **UpperCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Optional[Any] =['torch']
def __init__( self : Tuple , *UpperCamelCase : Optional[int] , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : List[str] , *UpperCamelCase : str , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : List[Any] =['torch']
def __init__( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Any , *UpperCamelCase : Optional[int] , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Tuple , *UpperCamelCase : int , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : List[Any] =['torch']
def __init__( self : int , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *UpperCamelCase : List[str] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : int =['torch']
def __init__( self : str , *UpperCamelCase : str , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : List[str] , *UpperCamelCase : List[Any] , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : List[str] , *UpperCamelCase : List[str] , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Dict =['torch']
def __init__( self : List[str] , *UpperCamelCase : Any , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *UpperCamelCase : Tuple , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : List[str] , *UpperCamelCase : Any , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Dict =['torch']
def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Any , *UpperCamelCase : Dict , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : List[Any] =['torch']
def __init__( self : Any , *UpperCamelCase : int , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Dict , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : str , *UpperCamelCase : int , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Union[str, Any] =['torch']
def __init__( self : List[str] , *UpperCamelCase : List[str] , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : int , *UpperCamelCase : Tuple , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Any , *UpperCamelCase : Any , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Union[str, Any] =['torch']
def __init__( self : List[Any] , *UpperCamelCase : Optional[int] , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *UpperCamelCase : List[str] , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : int , *UpperCamelCase : Dict , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : List[Any] =['torch']
def __init__( self : str , *UpperCamelCase : Dict , **UpperCamelCase : int ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Any , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Dict =['torch']
def __init__( self : List[Any] , *UpperCamelCase : Any , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *UpperCamelCase : Any , **UpperCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *UpperCamelCase : Dict , **UpperCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : str =['torch']
def __init__( self : List[str] , *UpperCamelCase : Any , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : int ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Union[str, Any] =['torch']
def __init__( self : Tuple , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *UpperCamelCase : List[Any] , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Tuple =['torch']
def __init__( self : str , *UpperCamelCase : Dict , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *UpperCamelCase : Dict , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : Optional[Any] =['torch']
def __init__( self : Any , *UpperCamelCase : Tuple , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *UpperCamelCase : List[str] , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Any , *UpperCamelCase : int , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
a_ : List[Any] =['torch']
def __init__( self : Any , *UpperCamelCase : int , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : str , *UpperCamelCase : List[str] , **UpperCamelCase : int ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *UpperCamelCase : int , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
| 716 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : torch.FloatTensor
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : str , UpperCamelCase : int = 32 , UpperCamelCase : int = 64 , UpperCamelCase : int = 20 , UpperCamelCase : int = 7_68 , UpperCamelCase : Optional[int]=77 , UpperCamelCase : int=4 , UpperCamelCase : float = 0.0 , UpperCamelCase : str = "silu" , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = "linear" , UpperCamelCase : Optional[str] = "prd" , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case : str = num_attention_heads
_snake_case : Optional[int] = attention_head_dim
_snake_case : Any = num_attention_heads * attention_head_dim
_snake_case : List[Any] = additional_embeddings
_snake_case : List[str] = time_embed_dim or inner_dim
_snake_case : int = embedding_proj_dim or embedding_dim
_snake_case : List[Any] = clip_embed_dim or embedding_dim
_snake_case : Optional[Any] = Timesteps(UpperCamelCase , UpperCamelCase , 0 )
_snake_case : List[Any] = TimestepEmbedding(UpperCamelCase , UpperCamelCase , out_dim=UpperCamelCase , act_fn=UpperCamelCase )
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
if embedding_proj_norm_type is None:
_snake_case : str = None
elif embedding_proj_norm_type == "layer":
_snake_case : List[Any] = nn.LayerNorm(UpperCamelCase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_snake_case : str = nn.Linear(UpperCamelCase , UpperCamelCase )
if encoder_hid_proj_type is None:
_snake_case : Any = None
elif encoder_hid_proj_type == "linear":
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase ) )
if added_emb_type == "prd":
_snake_case : str = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase ) )
elif added_emb_type is None:
_snake_case : Dict = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_snake_case : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , activation_fn='gelu' , attention_bias=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
if norm_in_type == "layer":
_snake_case : Optional[int] = nn.LayerNorm(UpperCamelCase )
elif norm_in_type is None:
_snake_case : Optional[Any] = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
_snake_case : Optional[Any] = nn.LayerNorm(UpperCamelCase )
_snake_case : Union[str, Any] = nn.Linear(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_snake_case : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , UpperCamelCase , persistent=UpperCamelCase )
_snake_case : str = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = {}
def fn_recursive_add_processors(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase , 'set_processor' ):
_snake_case : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return processors
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
_snake_case : Optional[int] = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Union[str, Any] ):
if hasattr(UpperCamelCase , 'set_processor' ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
module.set_processor(UpperCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Union[torch.Tensor, float, int] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[torch.BoolTensor] = None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_snake_case : Dict = hidden_states.shape[0]
_snake_case : str = timestep
if not torch.is_tensor(UpperCamelCase ):
_snake_case : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase ) and len(timesteps.shape ) == 0:
_snake_case : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case : Optional[int] = timesteps * torch.ones(UpperCamelCase , dtype=timesteps.dtype , device=timesteps.device )
_snake_case : Union[str, Any] = self.time_proj(UpperCamelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_snake_case : Tuple = timesteps_projected.to(dtype=self.dtype )
_snake_case : List[Any] = self.time_embedding(UpperCamelCase )
if self.embedding_proj_norm is not None:
_snake_case : Optional[Any] = self.embedding_proj_norm(UpperCamelCase )
_snake_case : Union[str, Any] = self.embedding_proj(UpperCamelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_snake_case : Dict = self.encoder_hidden_states_proj(UpperCamelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_snake_case : str = self.proj_in(UpperCamelCase )
_snake_case : int = self.positional_embedding.to(hidden_states.dtype )
_snake_case : Optional[int] = []
_snake_case : List[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_snake_case : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_snake_case : str = hidden_states[:, None, :]
_snake_case : str = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_snake_case : int = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase , -1 , -1 )
additional_embeds.append(UpperCamelCase )
_snake_case : Optional[int] = torch.cat(
UpperCamelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_snake_case : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_snake_case : Optional[Any] = F.pad(
UpperCamelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_snake_case : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_snake_case : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_snake_case : Tuple = F.pad(UpperCamelCase , (0, self.additional_embeddings) , value=0.0 )
_snake_case : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_snake_case : str = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_snake_case : Tuple = self.norm_in(UpperCamelCase )
for block in self.transformer_blocks:
_snake_case : Any = block(UpperCamelCase , attention_mask=UpperCamelCase )
_snake_case : Dict = self.norm_out(UpperCamelCase )
if self.prd_embedding is not None:
_snake_case : str = hidden_states[:, -1]
else:
_snake_case : Any = hidden_states[:, additional_embeddings_len:]
_snake_case : List[Any] = self.proj_to_clip_embeddings(UpperCamelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 669 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '''▁'''
lowerCAmelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowerCAmelCase_ = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1024,
}
# fmt: off
lowerCAmelCase_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class _lowerCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
a_ : Union[str, Any] =VOCAB_FILES_NAMES
a_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] =["""input_ids""", """attention_mask"""]
a_ : List[int] =[]
a_ : List[int] =[]
def __init__( self : Tuple , UpperCamelCase : List[str] , UpperCamelCase : int=None , UpperCamelCase : List[str]=None , UpperCamelCase : Dict="</s>" , UpperCamelCase : Union[str, Any]="</s>" , UpperCamelCase : Dict="<s>" , UpperCamelCase : Tuple="<unk>" , UpperCamelCase : Union[str, Any]="<pad>" , UpperCamelCase : Optional[Any]="<mask>" , UpperCamelCase : Dict = None , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
_snake_case : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
_snake_case : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_snake_case : str = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=UpperCamelCase , tgt_lang=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , )
_snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase ) )
_snake_case : Any = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_snake_case : Optional[Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_snake_case : str = 1
_snake_case : Union[str, Any] = len(self.sp_model )
_snake_case : Optional[int] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase )
}
_snake_case : Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()}
_snake_case : Tuple = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_snake_case : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_snake_case : Tuple = src_lang if src_lang is not None else 'en_XX'
_snake_case : List[str] = self.lang_code_to_id[self._src_lang]
_snake_case : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self : str , UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : str ):
'''simple docstring'''
_snake_case : List[Any] = self.__dict__.copy()
_snake_case : Optional[Any] = None
return state
def __setstate__( self : Optional[Any] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_snake_case : Optional[Any] = {}
_snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : int = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_snake_case : Optional[int] = self.sp_model.PieceToId(UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Tuple ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : Optional[int] = []
_snake_case : Optional[int] = ''
_snake_case : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase ) + token
_snake_case : Union[str, Any] = True
_snake_case : int = []
else:
current_sub_tokens.append(UpperCamelCase )
_snake_case : Union[str, Any] = False
out_string += self.sp_model.decode(UpperCamelCase )
return out_string.strip()
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Tuple , UpperCamelCase : List[Any] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case : Tuple = os.path.join(
UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase , 'wb' ) as fi:
_snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Tuple = None , UpperCamelCase : Optional[int] = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
_snake_case : Dict = [1] * len(self.prefix_tokens )
_snake_case : str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase )) + ([0] * len(UpperCamelCase )) + suffix_ones
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : List[str] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self : int , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , **UpperCamelCase : Tuple ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_snake_case : Dict = src_lang
_snake_case : str = self(UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
_snake_case : Optional[Any] = self.convert_tokens_to_ids(UpperCamelCase )
_snake_case : Tuple = tgt_lang_id
return inputs
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : str = "en_XX" , UpperCamelCase : Tuple = None , UpperCamelCase : Any = "ro_RO" , **UpperCamelCase : Optional[int] , ):
'''simple docstring'''
_snake_case : int = src_lang
_snake_case : int = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase , UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.lang_code_to_id[src_lang]
_snake_case : Optional[int] = [self.cur_lang_code_id]
_snake_case : List[str] = [self.eos_token_id]
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.lang_code_to_id[tgt_lang]
_snake_case : Optional[Any] = [self.cur_lang_code_id]
_snake_case : Optional[Any] = [self.eos_token_id]
| 717 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase )
if number < 1:
_snake_case : int = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCAmelCase )
_snake_case : int = 1
for i in range(1 , lowerCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
"""configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""],
"""tokenization_deberta""": ["""DebertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""DebertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DebertaForMaskedLM""",
"""DebertaForQuestionAnswering""",
"""DebertaForSequenceClassification""",
"""DebertaForTokenClassification""",
"""DebertaModel""",
"""DebertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDebertaForMaskedLM""",
"""TFDebertaForQuestionAnswering""",
"""TFDebertaForSequenceClassification""",
"""TFDebertaForTokenClassification""",
"""TFDebertaModel""",
"""TFDebertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 718 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] =VOCAB_FILES_NAMES
a_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
a_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Any =LxmertTokenizer
def __init__( self : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=True , UpperCamelCase : List[str]="[UNK]" , UpperCamelCase : List[Any]="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Optional[int]="[MASK]" , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : List[Any] = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : Optional[int] = do_lower_case
_snake_case : Dict = strip_accents
_snake_case : Optional[int] = tokenize_chinese_chars
_snake_case : Optional[Any] = normalizer_class(**UpperCamelCase )
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : str=None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Tuple = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : int = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""ViTFeatureExtractor"""]
lowerCAmelCase_ = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 719 |
from __future__ import annotations
from random import random
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : int | None = None ):
'''simple docstring'''
_snake_case : str = value
_snake_case : List[Any] = random()
_snake_case : Node | None = None
_snake_case : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = str(self.value ) + ' '
_snake_case : List[Any] = str(self.left or '' )
_snake_case : int = str(self.right or '' )
return value + left + right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_snake_case , _snake_case : Optional[Any] = split(root.left , lowerCAmelCase )
return left, root
else:
_snake_case , _snake_case : List[str] = split(root.right , lowerCAmelCase )
return root, right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: Node | None )-> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_snake_case : str = merge(left.right , lowerCAmelCase )
return left
else:
_snake_case : Union[str, Any] = merge(lowerCAmelCase , right.left )
return right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case : Tuple = Node(lowerCAmelCase )
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , lowerCAmelCase )
return merge(merge(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , value - 1 )
_snake_case , _snake_case : List[str] = split(lowerCAmelCase , lowerCAmelCase )
return merge(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None )-> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: str )-> Node | None:
for arg in args.split():
if arg[0] == "+":
_snake_case : List[str] = insert(lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
_snake_case : Any = erase(lowerCAmelCase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowerCamelCase_ ( )-> None:
_snake_case : Tuple = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
_snake_case : List[Any] = input()
while args != "q":
_snake_case : int = interact_treap(lowerCAmelCase , lowerCAmelCase )
print(lowerCAmelCase )
_snake_case : Tuple = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 669 | 0 |
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
lowerCAmelCase_ = 'facebook/wmt19-en-de'
lowerCAmelCase_ = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
lowerCAmelCase_ = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
lowerCAmelCase_ = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
lowerCAmelCase_ = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
lowerCAmelCase_ = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
lowerCAmelCase_ = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 720 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( lowerCAmelCase: str = N )-> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase , lowerCAmelCase : str(int(lowerCAmelCase ) * int(lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 | 0 |
from __future__ import annotations
import math
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: bool , lowerCAmelCase: list[int] , lowerCAmelCase: float )-> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(_UpperCamelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
def lowerCamelCase_ ( )-> str:
_snake_case : Union[str, Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
_snake_case : Dict = math.log(len(_UpperCamelCase ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 721 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase_ ( )-> Any:
_snake_case : List[str] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_snake_case : Optional[Any] = Dataset.from_dict(lowerCAmelCase )
return dataset
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = get_dataset()
_snake_case : Tuple = make_duplicate_clusters(UpperCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = get_dataset()
_snake_case , _snake_case : str = deduplicate_dataset(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 2 )
print(UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCamelCase )
| 669 | 0 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _lowerCAmelCase ( __a ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
with open(A__ , encoding='utf-8' ) as input_file:
_snake_case : Dict = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
_snake_case : Any = input_file.read()
_snake_case : List[str] = regexp.search(A__ )
return match
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : int ):
'''simple docstring'''
with open(A__ , encoding='utf-8' ) as input_file:
_snake_case : Optional[Any] = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
_snake_case : int = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_snake_case : Optional[int] = regexp.finditer(A__ )
_snake_case : int = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[Any] = Path('./datasets' )
_snake_case : Any = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(A__ ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Tuple = Path('./datasets' )
_snake_case : Any = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(A__ ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 700 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =["""image_processor""", """tokenizer"""]
a_ : Optional[int] ="""CLIPImageProcessor"""
a_ : Optional[Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
_snake_case : Optional[Any] = kwargs.pop('feature_extractor' )
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : Dict , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case : Optional[int] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
_snake_case : Optional[int] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 0 |
'''simple docstring'''
def lowerCamelCase_ ( lowerCAmelCase: Dict )-> List[Any]:
stooge(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
return arr
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] )-> int:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_snake_case , _snake_case : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_snake_case : Optional[Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , i + t , (_SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase_ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 701 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase_ = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase_ = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =200
a_ : List[str] ={"""Content-Length""": """100"""}
a_ : Tuple ={}
def UpperCamelCase_ ( self : Any , **UpperCamelCase : Any ):
'''simple docstring'''
return [bytes(UpperCamelCase , 'utf-8' )]
def lowerCamelCase_ ( *lowerCAmelCase: Tuple , **lowerCAmelCase: Tuple )-> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> Optional[Any]:
import requests
monkeypatch.setattr(lowerCAmelCase , 'request' , lowerCAmelCase )
_snake_case : List[str] = URL
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[int] = url
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = [url]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': url}
_snake_case : int = 'dummy'
_snake_case : Optional[Any] = 'downloads'
_snake_case : Union[str, Any] = tmp_path
_snake_case : Dict = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase , lowerCAmelCase ) , use_etag=lowerCAmelCase , )
_snake_case : str = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Optional[int] = dl_manager.download(lowerCAmelCase )
_snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = [downloaded_paths]
_snake_case : List[str] = [urls]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
_snake_case : Any = downloaded_paths.values()
_snake_case : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase , lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case : str = Path(lowerCAmelCase )
_snake_case : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case : List[str] = downloaded_path.read_text()
assert content == CONTENT
_snake_case : Any = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any )-> str:
_snake_case : str = str(lowerCAmelCase )
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : str = filename
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[Any] = [filename]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': filename}
_snake_case : Any = 'dummy'
_snake_case : Union[str, Any] = xz_file.parent
_snake_case : int = 'extracted'
_snake_case : Union[str, Any] = DownloadConfig(
cache_dir=lowerCAmelCase , use_etag=lowerCAmelCase , )
_snake_case : List[str] = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Dict = dl_manager.extract(lowerCAmelCase )
_snake_case : Optional[int] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = [extracted_paths]
_snake_case : int = [paths]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in extracted_paths.keys()
_snake_case : Optional[int] = extracted_paths.values()
_snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase , lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case : List[str] = Path(lowerCAmelCase )
_snake_case : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase , etag=lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case : Optional[int] = extracted_path.read_text()
_snake_case : int = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> Dict:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCAmelCase , start=1 ):
_snake_case : Dict = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> Dict:
_snake_case : List[str] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: int )-> str:
_snake_case : List[Any] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase_ ( lowerCAmelCase: Any )-> int:
_snake_case : Tuple = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase ) , start=1 ):
assert os.path.basename(lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 669 | 0 |
from __future__ import annotations
from typing import TypedDict
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
a_ : Optional[Any] =42
a_ : int =42
def lowerCamelCase_ ( lowerCAmelCase: List[str] )-> Tuple:
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(lowercase__ ) )]
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> Tuple:
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
_snake_case : List[str] = all_rotations(lowercase__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_snake_case : Optional[int] = {
'bwt_string': ''.join([word[-1] for word in rotations] ),
'idx_original_string': rotations.index(lowercase__ ),
}
return response
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Tuple )-> Union[str, Any]:
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
_snake_case : Dict = int(lowercase__ )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(lowercase__ ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
_snake_case : Optional[int] = [''] * len(lowercase__ )
for _ in range(len(lowercase__ ) ):
for i in range(len(lowercase__ ) ):
_snake_case : str = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCAmelCase_ = """Provide a string that I will generate its BWT transform: """
lowerCAmelCase_ = input(entry_msg).strip()
lowerCAmelCase_ = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string '{s}' results """
F"""in '{result["bwt_string"]}'"""
)
lowerCAmelCase_ = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F"""Reversing Burrows Wheeler transform for entry '{result["bwt_string"]}' """
F"""we get original string '{original_string}'"""
)
| 702 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""roberta"""
def __init__( self : int , UpperCamelCase : Tuple=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : List[Any]=12 , UpperCamelCase : str=12 , UpperCamelCase : Dict=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : List[str]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=1e-1_2 , UpperCamelCase : str=1 , UpperCamelCase : int=0 , UpperCamelCase : Any=2 , UpperCamelCase : int="absolute" , UpperCamelCase : int=True , UpperCamelCase : List[Any]=None , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Any = vocab_size
_snake_case : List[str] = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Dict = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Union[str, Any] = use_cache
_snake_case : str = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 0 |
from __future__ import annotations
import math
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> Any:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ ( lowerCAmelCase: int )-> List[Any]:
_snake_case : Dict = str(lowerCAmelCase_ )
_snake_case : Dict = [n]
for i in range(1 , len(lowerCAmelCase_ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowerCamelCase_ ( lowerCAmelCase: List[Any] )-> Optional[int]:
if len(str(lowerCAmelCase_ ) ) > 3:
if not is_prime(int(str(lowerCAmelCase_ )[-3:] ) ) or not is_prime(int(str(lowerCAmelCase_ )[:3] ) ):
return False
return True
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] = 11 )-> Optional[int]:
_snake_case : list[int] = []
_snake_case : str = 13
while len(lowerCAmelCase_ ) != count:
if validate(lowerCAmelCase_ ):
_snake_case : Dict = list_truncated_nums(lowerCAmelCase_ )
if all(is_prime(lowerCAmelCase_ ) for i in list_nums ):
list_truncated_primes.append(lowerCAmelCase_ )
num += 2
return list_truncated_primes
def lowerCamelCase_ ( )-> str:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(11)) = }""")
| 703 |
from random import randint, random
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: int = 5 , )-> list:
_snake_case : Dict = [[-1] * number_of_cells] # Create a highway without any car
_snake_case : List[str] = 0
_snake_case : List[str] = max(lowerCAmelCase , 0 )
while i < number_of_cells:
_snake_case : Optional[Any] = (
randint(0 , lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int )-> int:
_snake_case : Dict = 0
_snake_case : Optional[Any] = highway_now[car_index + 1 :]
for cell in range(len(lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCAmelCase , -1 )
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : List[Any] = len(lowerCAmelCase )
# Beforce calculations, the highway is empty
_snake_case : List[Any] = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_snake_case : int = min(highway_now[car_index] + 1 , lowerCAmelCase )
# Number of empty cell before the next car
_snake_case : Tuple = get_distance(lowerCAmelCase , lowerCAmelCase ) - 1
# We can't have the car causing an accident
_snake_case : Union[str, Any] = min(next_highway[car_index] , lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
_snake_case : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : Dict = len(highway[0] )
for i in range(lowerCAmelCase ):
_snake_case : Any = update(highway[i] , lowerCAmelCase , lowerCAmelCase )
_snake_case : Tuple = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
_snake_case : Union[str, Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_snake_case : Union[str, Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_snake_case : Tuple = speed
highway.append(lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
from __future__ import annotations
import math
def lowerCamelCase_ ( lowerCAmelCase: int )-> str:
if num <= 0:
_snake_case : Tuple = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = [True] * (num + 1)
_snake_case : List[Any] = []
_snake_case : Union[str, Any] = 2
_snake_case : List[str] = int(math.sqrt(SCREAMING_SNAKE_CASE__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE__ ):
if sieve[i] is True:
_snake_case : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 704 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =VOCAB_FILES_NAMES
a_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
a_ : str =PRETRAINED_INIT_CONFIGURATION
a_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] =RealmTokenizer
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]="[UNK]" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Optional[Any]="[PAD]" , UpperCamelCase : Optional[int]="[CLS]" , UpperCamelCase : Optional[Any]="[MASK]" , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : int = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : List[str] = do_lower_case
_snake_case : List[Any] = strip_accents
_snake_case : Dict = tokenize_chinese_chars
_snake_case : Any = normalizer_class(**UpperCamelCase )
_snake_case : Optional[int] = do_lower_case
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[str] = kwargs.pop('text_pair' , UpperCamelCase )
_snake_case : int = kwargs.pop('return_tensors' , UpperCamelCase )
_snake_case : Optional[int] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCamelCase ):
if batch_text_pair is not None:
_snake_case : List[Any] = batch_text_pair[idx]
else:
_snake_case : Optional[Any] = None
_snake_case : Optional[int] = super().__call__(UpperCamelCase , UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
_snake_case : str = encoded_candidates.get('input_ids' )
_snake_case : Tuple = encoded_candidates.get('attention_mask' )
_snake_case : List[str] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCamelCase )
_snake_case : str = {key: item for key, item in output_data.items() if len(UpperCamelCase ) != 0}
return BatchEncoding(UpperCamelCase , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : int = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 0 |
def lowerCamelCase_ ( lowerCAmelCase: str )-> List[Any]:
_snake_case : Optional[int] = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
_snake_case : str = hex_num[0] == '''-'''
if is_negative:
_snake_case : int = hex_num[1:]
try:
_snake_case : int = int(a_ , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
_snake_case : List[Any] = ''''''
while int_num > 0:
_snake_case : Optional[Any] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] )-> Optional[int]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_snake_case : Tuple = TOKENIZER_CLASSES
else:
_snake_case : Union[str, Any] = {tokenizer_name: getattr(lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_snake_case : Dict = TOKENIZER_CLASSES[tokenizer_name]
_snake_case : Optional[Any] = True
if checkpoint_name is None:
_snake_case : Union[str, Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_snake_case : Optional[int] = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_snake_case : str = tokenizer_class.from_pretrained(lowerCAmelCase , force_download=lowerCAmelCase )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_snake_case , _snake_case : Tuple = checkpoint.split('/' )
_snake_case : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
elif add_prefix:
_snake_case : Dict = checkpoint
_snake_case : Optional[Any] = dump_path
else:
_snake_case : str = None
_snake_case : Union[str, Any] = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_snake_case : Optional[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_snake_case : Optional[int] = file_path.split(lowerCAmelCase )[-1][0]
if next_char == "/":
_snake_case : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
_snake_case : str = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_snake_case : Optional[int] = tokenizer.save_pretrained(
lowerCAmelCase , legacy_format=lowerCAmelCase , filename_prefix=lowerCAmelCase )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCAmelCase )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCAmelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 0 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase_ = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def lowerCamelCase_ ( lowerCAmelCase: str )-> int:
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
lowerCAmelCase_ = parser.parse_args()
if args.check_lib:
lowerCAmelCase_ = importlib.import_module("""transformers""")
lowerCAmelCase_ = Path(transformers_module.__file__).parent
else:
lowerCAmelCase_ = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
| 706 |
def lowerCamelCase_ ( lowerCAmelCase: bytes )-> str:
return "".join([hex(lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase )] )
def lowerCamelCase_ ( lowerCAmelCase: str )-> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCAmelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase_ = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 707 |
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
def lowerCamelCase_ ( lowerCAmelCase: str )-> None:
# authorize twitter, initialize tweepy
_snake_case : Optional[Any] = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
_snake_case : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : List[str] = api.user_timeline(screen_name=lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
_snake_case : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : Tuple = api.user_timeline(
screen_name=lowerCAmelCase , count=2_00 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
_snake_case : List[str] = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
_snake_case : Any = csv.writer(lowerCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 669 | 0 |
import math
import random
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: Tuple = False )-> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCAmelCase_ = 0.02
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[int] )-> float:
_snake_case : int = float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(__UpperCamelCase ):
# Forward propagation
_snake_case : List[str] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
_snake_case : Tuple = (expected / 1_00) - layer_a
# Error delta
_snake_case : str = layer_1_error * sigmoid_function(__UpperCamelCase , __UpperCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = int(input("""Expected value: """))
lowerCAmelCase_ = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 708 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[Union[str, Path]] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : bool =True
a_ : Optional[int] =None
a_ : int =1
a_ : Optional[Union[str, bool]] =None
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase ) for k, v in self.__dict__.items()} )
| 669 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
a_ : str ='timm_backbone'
def __init__( self : str , UpperCamelCase : int=None , UpperCamelCase : Tuple=3 , UpperCamelCase : Tuple=True , UpperCamelCase : Any=True , UpperCamelCase : Union[str, Any]=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
_snake_case : Any = backbone
_snake_case : List[str] = num_channels
_snake_case : Tuple = features_only
_snake_case : Dict = use_pretrained_backbone
_snake_case : Optional[Any] = True
_snake_case : List[Any] = out_indices if out_indices is not None else (-1,)
| 709 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase_ = ["""gpt2"""]
lowerCAmelCase_ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = tokenizer
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase )
_snake_case : int = TFGPTaLMHeadModel.from_config(UpperCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer(UpperCamelCase )
_snake_case : Union[str, Any] = tokenized['input_ids'].to_tensor()
_snake_case : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_snake_case : Tuple = self.model(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
_snake_case : Optional[int] = [GPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_snake_case : Tuple = [TFGPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case : Any = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_snake_case : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_snake_case : Optional[int] = tokenizer([test_inputs] , return_tensors='tf' )
_snake_case : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_snake_case : Dict = python_outputs[key].numpy()
_snake_case : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : str = tf.function(UpperCamelCase )
for test_inputs in self.test_sentences:
_snake_case : int = tf.constant(UpperCamelCase )
_snake_case : Tuple = compiled_tokenizer(UpperCamelCase )
_snake_case : int = tf_tokenizer(UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Union[str, Any] = ModelToSave(tokenizer=UpperCamelCase )
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Tuple = model.serving(UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case : str = Path(UpperCamelCase ) / 'saved.model'
tf.saved_model.save(UpperCamelCase , UpperCamelCase , signatures={'serving_default': model.serving} )
_snake_case : Optional[int] = tf.saved_model.load(UpperCamelCase )
_snake_case : List[str] = loaded_model.signatures['serving_default'](UpperCamelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Any = tf_tokenizer(UpperCamelCase ) # Build model with some sample inputs
_snake_case : Optional[Any] = tf_tokenizer.get_config()
_snake_case : Tuple = TFGPTaTokenizer.from_config(UpperCamelCase )
_snake_case : Optional[Any] = model_from_config(UpperCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_snake_case : Union[str, Any] = 12_31_23
for max_length in [3, 5, 10_24]:
_snake_case : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : List[str] = tf_tokenizer(UpperCamelCase , max_length=UpperCamelCase )
_snake_case : int = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 669 | 0 |
from math import sqrt
def lowerCamelCase_ ( lowerCAmelCase: int )-> List[Any]:
_snake_case : Dict = 0
for i in range(1 , int(sqrt(snake_case_ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case_ ):
total += i + n // i
elif i == sqrt(snake_case_ ):
total += i
return total - n
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] = 1_00_00 )-> int:
_snake_case : Optional[Any] = sum(
i
for i in range(1 , snake_case_ )
if sum_of_divisors(sum_of_divisors(snake_case_ ) ) == i and sum_of_divisors(snake_case_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 710 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> list:
_snake_case : List[Any] = int(lowerCAmelCase )
if n_element < 1:
_snake_case : int = ValueError('a should be a positive number' )
raise my_error
_snake_case : Union[str, Any] = [1]
_snake_case , _snake_case , _snake_case : Any = (0, 0, 0)
_snake_case : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 669 | 0 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> bool:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
_snake_case : Dict = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__UpperCamelCase )
if number < 0:
return False
_snake_case : List[str] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Tuple="shi-labs/oneformer_demo" )-> Any:
with open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) as f:
_snake_case : str = json.load(lowerCAmelCase )
_snake_case : List[str] = {}
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = []
for key, info in class_info.items():
_snake_case : Optional[int] = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase ) )
_snake_case : List[str] = thing_ids
_snake_case : Optional[Any] = class_names
return metadata
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any=7 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Dict=30 , UpperCamelCase : int=4_00 , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : str=True , UpperCamelCase : Any=[0.5, 0.5, 0.5] , UpperCamelCase : int=[0.5, 0.5, 0.5] , UpperCamelCase : Dict=10 , UpperCamelCase : Dict=False , UpperCamelCase : Dict=2_55 , UpperCamelCase : Dict="shi-labs/oneformer_demo" , UpperCamelCase : Optional[int]="ade20k_panoptic.json" , UpperCamelCase : Tuple=10 , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Tuple = num_channels
_snake_case : List[str] = min_resolution
_snake_case : List[str] = max_resolution
_snake_case : Optional[Any] = do_resize
_snake_case : Optional[Any] = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
_snake_case : Optional[int] = do_normalize
_snake_case : Any = image_mean
_snake_case : List[Any] = image_std
_snake_case : Any = class_info_file
_snake_case : List[str] = prepare_metadata(UpperCamelCase , UpperCamelCase )
_snake_case : Any = num_text
_snake_case : str = repo_path
# for the post_process_functions
_snake_case : Optional[Any] = 2
_snake_case : str = 10
_snake_case : Union[str, Any] = 10
_snake_case : List[Any] = 3
_snake_case : str = 4
_snake_case : List[Any] = num_labels
_snake_case : str = do_reduce_labels
_snake_case : List[str] = ignore_index
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
if not batched:
_snake_case : Any = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
_snake_case , _snake_case : Any = image.size
else:
_snake_case , _snake_case : Any = image.shape[1], image.shape[2]
if w < h:
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
_snake_case : Any = self.size['shortest_edge']
elif w > h:
_snake_case : int = self.size['shortest_edge']
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
_snake_case : Dict = self.size['shortest_edge']
_snake_case : Dict = self.size['shortest_edge']
else:
_snake_case : List[Any] = []
for image in image_inputs:
_snake_case , _snake_case : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : List[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
_snake_case : Optional[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ : Any =image_processing_class
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCamelCase , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCamelCase , 'num_text' ) )
self.assertTrue(hasattr(UpperCamelCase , 'repo_path' ) )
self.assertTrue(hasattr(UpperCamelCase , 'metadata' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_reduce_labels' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : Optional[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : List[Any] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : int = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
_snake_case : int = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : Optional[int] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
_snake_case : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : List[str] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Tuple=False , UpperCamelCase : str=False , UpperCamelCase : Dict="np" ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_snake_case : List[str] = self.image_processing_tester.num_labels
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
if with_segmentation_maps:
_snake_case : Optional[int] = num_labels
if is_instance_map:
_snake_case : Union[str, Any] = list(range(UpperCamelCase ) ) * 2
_snake_case : Tuple = dict(enumerate(UpperCamelCase ) )
_snake_case : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_snake_case : int = [Image.fromarray(UpperCamelCase ) for annotation in annotations]
_snake_case : List[Any] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , UpperCamelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCamelCase , pad_and_return_pixel_mask=UpperCamelCase , )
return inputs
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
def common(UpperCamelCase : Any=False , UpperCamelCase : int=None ):
_snake_case : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase , is_instance_map=UpperCamelCase , segmentation_type=UpperCamelCase )
_snake_case : Union[str, Any] = inputs['mask_labels']
_snake_case : Optional[int] = inputs['class_labels']
_snake_case : Optional[int] = inputs['pixel_values']
_snake_case : Optional[Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.zeros((20, 50) )
_snake_case : int = 1
_snake_case : int = 1
_snake_case : Optional[Any] = 1
_snake_case : List[Any] = binary_mask_to_rle(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = fature_extractor.post_process_semantic_segmentation(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_snake_case : Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_snake_case : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(UpperCamelCase , target_sizes=UpperCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : int = image_processor.post_process_instance_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = image_processor.post_process_panoptic_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 669 | 0 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: int , lowerCAmelCase: List[str] , lowerCAmelCase: str , lowerCAmelCase: Tuple )-> Any:
# Load configuration defined in the metadata file
with open(lowerCamelCase_ ) as metadata_file:
_snake_case : str = json.load(lowerCamelCase_ )
_snake_case : Dict = LukeConfig(use_entity_aware_attention=lowerCamelCase_ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
_snake_case : Optional[int] = torch.load(lowerCamelCase_ , map_location='cpu' )
# Load the entity vocab file
_snake_case : int = load_entity_vocab(lowerCamelCase_ )
_snake_case : Union[str, Any] = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
_snake_case : str = AddedToken('<ent>' , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )
_snake_case : str = AddedToken('<ent2>' , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[Any] = LukeTokenizer.from_pretrained(lowerCamelCase_ )
# Initialize the embeddings of the special tokens
_snake_case : Tuple = state_dict['embeddings.word_embeddings.weight']
_snake_case : Any = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
_snake_case : Dict = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
_snake_case : str = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_snake_case : List[Any] = F"""encoder.layer.{layer_index}.attention.self."""
_snake_case : Any = state_dict[prefix + matrix_name]
_snake_case : List[Any] = state_dict[prefix + matrix_name]
_snake_case : List[str] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_snake_case : List[str] = state_dict['entity_embeddings.entity_embeddings.weight']
_snake_case : List[str] = entity_emb[entity_vocab['[MASK]']]
_snake_case : Dict = LukeModel(config=lowerCamelCase_ ).eval()
_snake_case , _snake_case : Any = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
if not (len(lowerCamelCase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {', '.join(lowerCamelCase_ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
F""" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}""" )
# Check outputs
_snake_case : str = LukeTokenizer.from_pretrained(lowerCamelCase_ , task='entity_classification' )
_snake_case : Union[str, Any] = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
_snake_case : Optional[Any] = (39, 42)
_snake_case : List[Any] = tokenizer(lowerCamelCase_ , entity_spans=[span] , add_prefix_space=lowerCamelCase_ , return_tensors='pt' )
_snake_case : str = model(**lowerCamelCase_ )
# Verify word hidden states
if model_size == "large":
_snake_case : int = torch.Size((1, 42, 10_24) )
_snake_case : Union[str, Any] = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
_snake_case : int = torch.Size((1, 42, 7_68) )
_snake_case : Optional[int] = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_snake_case : Any = torch.Size((1, 1, 10_24) )
_snake_case : int = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
_snake_case : Dict = torch.Size((1, 1, 7_68) )
_snake_case : Any = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(lowerCamelCase_ ) )
model.save_pretrained(lowerCamelCase_ )
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Union[str, Any]:
_snake_case : List[Any] = {}
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(lowerCamelCase_ ):
_snake_case , _snake_case : Tuple = line.rstrip().split('\t' )
_snake_case : Any = index
return entity_vocab
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
lowerCAmelCase_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 712 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase_ = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCamelCase_ ( )-> Tuple:
_snake_case : int = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_snake_case : int = get_sagemaker_input()
else:
_snake_case : Any = get_cluster_input()
return config
def lowerCamelCase_ ( lowerCAmelCase: str=None )-> Any:
if subparsers is not None:
_snake_case : List[Any] = subparsers.add_parser('config' , description=lowerCAmelCase )
else:
_snake_case : Dict = argparse.ArgumentParser('Accelerate config command' , description=lowerCAmelCase )
parser.add_argument(
'--config_file' , default=lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Any:
_snake_case : Dict = get_user_input()
if args.config_file is not None:
_snake_case : List[str] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
_snake_case : Union[str, Any] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowerCAmelCase )
else:
config.to_yaml_file(lowerCAmelCase )
print(F"""accelerate configuration saved at {config_file}""" )
def lowerCamelCase_ ( )-> Dict:
_snake_case : List[str] = config_command_parser()
_snake_case : str = parser.parse_args()
config_command(lowerCAmelCase )
if __name__ == "__main__":
main()
| 669 | 0 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] )-> Dict:
_snake_case : str = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> str:
_snake_case : Optional[int] = emb.weight.shape
_snake_case : Any = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
_snake_case : Dict = emb.weight.data
return lin_layer
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] , lowerCAmelCase: Any=None )-> List[Any]:
_snake_case : Any = {}
for old_key in state_dict.keys():
_snake_case : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_snake_case : Union[str, Any] = key.replace('moe_layer.experts.0' , F"""ffn.experts.expert_{expert_idx}""" )
else:
_snake_case : Dict = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
_snake_case : Optional[int] = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
_snake_case : int = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
_snake_case : Union[str, Any] = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
_snake_case : List[Any] = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
_snake_case : Optional[Any] = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
_snake_case : Optional[int] = key.replace('final_layer_norm' , 'ff_layer_norm' )
_snake_case : Optional[int] = state_dict[old_key]
return new_dict
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Tuple , lowerCAmelCase: List[Any] , lowerCAmelCase: str = WEIGHTS_NAME )-> List[Any]:
_snake_case : Union[str, Any] = []
_snake_case : Union[str, Any] = 0
os.makedirs(__snake_case , exist_ok=__snake_case )
for expert in range(__snake_case ):
_snake_case : Optional[Any] = switch_checkpoint_path + F"""-rank-{expert}.pt"""
if os.path.isfile(__snake_case ):
_snake_case : Optional[int] = torch.load(__snake_case )["model"]
remove_ignore_keys_(__snake_case )
_snake_case : Dict = rename_fairseq_keys(__snake_case , __snake_case )
_snake_case : Optional[int] = os.path.join(
__snake_case , weights_name.replace('.bin' , F"""-{len(__snake_case )+1:05d}-of-???.bin""" ) )
torch.save(__snake_case , __snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__snake_case )[0]].dtype )
# Add the last block
_snake_case : List[Any] = os.path.join(__snake_case , weights_name.replace('.bin' , F"""-{len(__snake_case )+1:05d}-of-???.bin""" ) )
_snake_case : List[Any] = torch.load(switch_checkpoint_path + '-shared.pt' )["model"]
remove_ignore_keys_(__snake_case )
_snake_case : Optional[Any] = rename_fairseq_keys(__snake_case , __snake_case )
_snake_case : int = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__snake_case ) == 1:
_snake_case : int = os.path.join(__snake_case , __snake_case )
torch.save(__snake_case , __snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__snake_case , __snake_case )
# Otherwise, let's build the index
_snake_case : Any = {}
for idx, shard in enumerate(__snake_case ):
_snake_case : Optional[Any] = weights_name.replace('.bin' , F"""-{idx+1:05d}-of-{len(__snake_case ):05d}.bin""" )
_snake_case : List[Any] = os.path.join(__snake_case , weights_name.replace('.bin' , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) )
for key in shard:
_snake_case : Optional[int] = shard_file
# Add the metadata
_snake_case : Tuple = {"total_size": total_size}
_snake_case : Optional[Any] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(__snake_case , __snake_case ) , 'w' , encoding='utf-8' ) as f:
_snake_case : List[Any] = json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + "\n"
f.write(__snake_case )
return metadata, index
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
lowerCAmelCase_ = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
lowerCAmelCase_ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 713 |
# Function to print upper half of diamond (pyramid)
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> List[str]:
for i in range(0 , lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> List[Any]:
for i in range(lowerCAmelCase , 0 , -1 ):
for _ in range(lowerCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> int:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase ) # upper half
reverse_floyd(lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
lowerCAmelCase_ = 1
while K:
lowerCAmelCase_ = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
lowerCAmelCase_ = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 669 | 0 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class _lowerCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : Dict , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = value_function
_snake_case : Any = unet
_snake_case : Optional[int] = scheduler
_snake_case : Dict = env
_snake_case : Union[str, Any] = env.get_dataset()
_snake_case : Union[str, Any] = {}
for key in self.data.keys():
try:
_snake_case : int = self.data[key].mean()
except: # noqa: E722
pass
_snake_case : Union[str, Any] = {}
for key in self.data.keys():
try:
_snake_case : Dict = self.data[key].std()
except: # noqa: E722
pass
_snake_case : Dict = env.observation_space.shape[0]
_snake_case : List[str] = env.action_space.shape[0]
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def UpperCamelCase_ ( self : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
if type(__A ) is dict:
return {k: self.to_torch(__A ) for k, v in x_in.items()}
elif torch.is_tensor(__A ):
return x_in.to(self.unet.device )
return torch.tensor(__A , device=self.unet.device )
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
for key, val in cond.items():
_snake_case : List[Any] = val.clone()
return x_in
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = x.shape[0]
_snake_case : Tuple = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
_snake_case : str = torch.full((batch_size,) , __A , device=self.unet.device , dtype=torch.long )
for _ in range(__A ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_snake_case : Union[str, Any] = self.value_function(x.permute(0 , 2 , 1 ) , __A ).sample
_snake_case : Tuple = torch.autograd.grad([y.sum()] , [x] )[0]
_snake_case : Union[str, Any] = self.scheduler._get_variance(__A )
_snake_case : Tuple = torch.exp(0.5 * posterior_variance )
_snake_case : Dict = model_std * grad
_snake_case : List[Any] = 0
_snake_case : int = x.detach()
_snake_case : Union[str, Any] = x + scale * grad
_snake_case : Dict = self.reset_xa(__A , __A , self.action_dim )
_snake_case : Optional[Any] = self.unet(x.permute(0 , 2 , 1 ) , __A ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
_snake_case : Any = self.scheduler.step(__A , __A , __A , predict_epsilon=__A )['prev_sample']
# apply conditions to the trajectory (set the initial state)
_snake_case : str = self.reset_xa(__A , __A , self.action_dim )
_snake_case : str = self.to_torch(__A )
return x, y
def __call__( self : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Dict=64 , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : List[Any]=2 , UpperCamelCase : Optional[Any]=0.1 ):
'''simple docstring'''
_snake_case : int = self.normalize(__A , 'observations' )
_snake_case : List[str] = obs[None].repeat(__A , axis=0 )
_snake_case : List[str] = {0: self.to_torch(__A )}
_snake_case : Optional[int] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_snake_case : Any = randn_tensor(__A , device=self.unet.device )
_snake_case : Tuple = self.reset_xa(__A , __A , self.action_dim )
_snake_case : List[str] = self.to_torch(__A )
# run the diffusion process
_snake_case , _snake_case : int = self.run_diffusion(__A , __A , __A , __A )
# sort output trajectories by value
_snake_case : str = y.argsort(0 , descending=__A ).squeeze()
_snake_case : List[str] = x[sorted_idx]
_snake_case : Optional[Any] = sorted_values[:, :, : self.action_dim]
_snake_case : Tuple = actions.detach().cpu().numpy()
_snake_case : List[str] = self.de_normalize(__A , key='actions' )
# select the action with the highest value
if y is not None:
_snake_case : Optional[int] = 0
else:
# if we didn't run value guiding, select a random action
_snake_case : Dict = np.random.randint(0 , __A )
_snake_case : Optional[int] = denorm_actions[selected_index, 0]
return denorm_actions
| 714 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""audio-spectrogram-transformer"""
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any]=7_68 , UpperCamelCase : int=12 , UpperCamelCase : str=12 , UpperCamelCase : Tuple=30_72 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Any=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : str=16 , UpperCamelCase : List[Any]=True , UpperCamelCase : Any=10 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : int=10_24 , UpperCamelCase : Optional[Any]=1_28 , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : Tuple = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : int = patch_size
_snake_case : List[str] = qkv_bias
_snake_case : int = frequency_stride
_snake_case : List[Any] = time_stride
_snake_case : List[Any] = max_length
_snake_case : List[str] = num_mel_bins
| 669 | 0 |
import math
def lowerCamelCase_ ( lowerCAmelCase: float , lowerCAmelCase: float )-> int:
if (
not isinstance(UpperCamelCase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def lowerCamelCase_ ( lowerCAmelCase: float , lowerCAmelCase: float )-> Optional[int]:
if (
not isinstance(UpperCamelCase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: bool = True , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: bool = False , lowerCAmelCase: float = 1_00 , lowerCAmelCase: float = 0.0_1 , lowerCAmelCase: float = 1 , )-> Any:
_snake_case : int = False
_snake_case : Any = search_prob
_snake_case : Tuple = start_temperate
_snake_case : Any = []
_snake_case : List[str] = 0
_snake_case : Optional[Any] = None
while not search_end:
_snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case : Dict = current_state
scores.append(lowerCAmelCase )
iterations += 1
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case : Dict = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
_snake_case : int = neighbors.pop(lowerCAmelCase )
_snake_case : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case : Union[str, Any] = picked_neighbor
else:
_snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case : int = picked_neighbor
_snake_case : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case : List[str] = True
else:
_snake_case : Union[str, Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[Any] )-> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Dict:
return (3 * x**2) - (6 * y)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 669 | 0 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCAmelCase_ = HfApi()
lowerCAmelCase_ = {}
# fmt: off
lowerCAmelCase_ = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
lowerCAmelCase_ = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
lowerCAmelCase_ = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
lowerCAmelCase_ = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
lowerCAmelCase_ = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
lowerCAmelCase_ = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
lowerCAmelCase_ = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
lowerCAmelCase_ = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
lowerCAmelCase_ = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
lowerCAmelCase_ = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
lowerCAmelCase_ = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
lowerCAmelCase_ = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
lowerCAmelCase_ = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
lowerCAmelCase_ = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
lowerCAmelCase_ = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
lowerCAmelCase_ = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCAmelCase_ = '''/home/patrick/google_checkpoints/''' + mod.modelId.split("""/""")[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("""CompVis"""):
lowerCAmelCase_ = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
lowerCAmelCase_ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCAmelCase_ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCAmelCase_ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowerCAmelCase_ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 716 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : torch.FloatTensor
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : str , UpperCamelCase : int = 32 , UpperCamelCase : int = 64 , UpperCamelCase : int = 20 , UpperCamelCase : int = 7_68 , UpperCamelCase : Optional[int]=77 , UpperCamelCase : int=4 , UpperCamelCase : float = 0.0 , UpperCamelCase : str = "silu" , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = "linear" , UpperCamelCase : Optional[str] = "prd" , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case : str = num_attention_heads
_snake_case : Optional[int] = attention_head_dim
_snake_case : Any = num_attention_heads * attention_head_dim
_snake_case : List[Any] = additional_embeddings
_snake_case : List[str] = time_embed_dim or inner_dim
_snake_case : int = embedding_proj_dim or embedding_dim
_snake_case : List[Any] = clip_embed_dim or embedding_dim
_snake_case : Optional[Any] = Timesteps(UpperCamelCase , UpperCamelCase , 0 )
_snake_case : List[Any] = TimestepEmbedding(UpperCamelCase , UpperCamelCase , out_dim=UpperCamelCase , act_fn=UpperCamelCase )
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
if embedding_proj_norm_type is None:
_snake_case : str = None
elif embedding_proj_norm_type == "layer":
_snake_case : List[Any] = nn.LayerNorm(UpperCamelCase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_snake_case : str = nn.Linear(UpperCamelCase , UpperCamelCase )
if encoder_hid_proj_type is None:
_snake_case : Any = None
elif encoder_hid_proj_type == "linear":
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase ) )
if added_emb_type == "prd":
_snake_case : str = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase ) )
elif added_emb_type is None:
_snake_case : Dict = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_snake_case : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , activation_fn='gelu' , attention_bias=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
if norm_in_type == "layer":
_snake_case : Optional[int] = nn.LayerNorm(UpperCamelCase )
elif norm_in_type is None:
_snake_case : Optional[Any] = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
_snake_case : Optional[Any] = nn.LayerNorm(UpperCamelCase )
_snake_case : Union[str, Any] = nn.Linear(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_snake_case : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , UpperCamelCase , persistent=UpperCamelCase )
_snake_case : str = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = {}
def fn_recursive_add_processors(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase , 'set_processor' ):
_snake_case : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return processors
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
_snake_case : Optional[int] = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Union[str, Any] ):
if hasattr(UpperCamelCase , 'set_processor' ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
module.set_processor(UpperCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Union[torch.Tensor, float, int] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[torch.BoolTensor] = None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_snake_case : Dict = hidden_states.shape[0]
_snake_case : str = timestep
if not torch.is_tensor(UpperCamelCase ):
_snake_case : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase ) and len(timesteps.shape ) == 0:
_snake_case : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case : Optional[int] = timesteps * torch.ones(UpperCamelCase , dtype=timesteps.dtype , device=timesteps.device )
_snake_case : Union[str, Any] = self.time_proj(UpperCamelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_snake_case : Tuple = timesteps_projected.to(dtype=self.dtype )
_snake_case : List[Any] = self.time_embedding(UpperCamelCase )
if self.embedding_proj_norm is not None:
_snake_case : Optional[Any] = self.embedding_proj_norm(UpperCamelCase )
_snake_case : Union[str, Any] = self.embedding_proj(UpperCamelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_snake_case : Dict = self.encoder_hidden_states_proj(UpperCamelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_snake_case : str = self.proj_in(UpperCamelCase )
_snake_case : int = self.positional_embedding.to(hidden_states.dtype )
_snake_case : Optional[int] = []
_snake_case : List[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_snake_case : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_snake_case : str = hidden_states[:, None, :]
_snake_case : str = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_snake_case : int = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase , -1 , -1 )
additional_embeds.append(UpperCamelCase )
_snake_case : Optional[int] = torch.cat(
UpperCamelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_snake_case : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_snake_case : Optional[Any] = F.pad(
UpperCamelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_snake_case : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_snake_case : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_snake_case : Tuple = F.pad(UpperCamelCase , (0, self.additional_embeddings) , value=0.0 )
_snake_case : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_snake_case : str = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_snake_case : Tuple = self.norm_in(UpperCamelCase )
for block in self.transformer_blocks:
_snake_case : Any = block(UpperCamelCase , attention_mask=UpperCamelCase )
_snake_case : Dict = self.norm_out(UpperCamelCase )
if self.prd_embedding is not None:
_snake_case : str = hidden_states[:, -1]
else:
_snake_case : Any = hidden_states[:, additional_embeddings_len:]
_snake_case : List[Any] = self.proj_to_clip_embeddings(UpperCamelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 669 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _lowerCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
a_ : str ="""gpt_neo"""
a_ : List[str] =["""past_key_values"""]
a_ : Dict ={"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : List[str] , UpperCamelCase : Dict=5_02_57 , UpperCamelCase : List[str]=20_48 , UpperCamelCase : Tuple=20_48 , UpperCamelCase : Optional[Any]=24 , UpperCamelCase : List[Any]=[[["global", "local"], 12]] , UpperCamelCase : str=16 , UpperCamelCase : Optional[int]=None , UpperCamelCase : Tuple=2_56 , UpperCamelCase : Union[str, Any]="gelu_new" , UpperCamelCase : int=0.0 , UpperCamelCase : Optional[Any]=0.0 , UpperCamelCase : int=0.0 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Dict=1e-5 , UpperCamelCase : Any=0.02 , UpperCamelCase : Dict=True , UpperCamelCase : Optional[Any]=5_02_56 , UpperCamelCase : Optional[Any]=5_02_56 , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
_snake_case : Tuple = vocab_size
_snake_case : Tuple = max_position_embeddings
_snake_case : List[Any] = hidden_size
_snake_case : int = num_layers
_snake_case : Dict = num_heads
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = window_size
_snake_case : Optional[Any] = activation_function
_snake_case : str = resid_dropout
_snake_case : str = embed_dropout
_snake_case : Union[str, Any] = attention_dropout
_snake_case : Any = classifier_dropout
_snake_case : Tuple = layer_norm_epsilon
_snake_case : Any = initializer_range
_snake_case : Optional[int] = use_cache
_snake_case : List[str] = bos_token_id
_snake_case : int = eos_token_id
_snake_case : Any = attention_types
_snake_case : Tuple = self.expand_attention_types_params(__a )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
f"""`config.num_layers = {self.num_layers}`. """
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
@staticmethod
def UpperCamelCase_ ( UpperCamelCase : Tuple ):
'''simple docstring'''
_snake_case : List[str] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Tuple , lowerCAmelCase: Dict , lowerCAmelCase: List[str] )-> Optional[int]:
import torch
_snake_case : str = input.size()
_snake_case : str = len(lowerCAmelCase )
_snake_case : List[Any] = shape[dimension]
_snake_case : int = torch.arange(0 , lowerCAmelCase , lowerCAmelCase )
_snake_case : List[Any] = torch.div(sizedim - size , lowerCAmelCase , rounding_mode='floor' ) + 1
_snake_case : Dict = torch.arange(lowerCAmelCase ) + low_indices[:min_length][:, None]
_snake_case : int = [slice(lowerCAmelCase )] * rank
_snake_case : Dict = indices
_snake_case : List[str] = input[s]
_snake_case : Tuple = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: List[Any] )-> Union[str, Any]:
import torch
_snake_case : Tuple = torch.arange(1 , lowerCAmelCase )
_snake_case : Tuple = torch.remainder(lowerCAmelCase , lowerCAmelCase )
_snake_case : Optional[Any] = remainders == 0
_snake_case : Optional[int] = candidates[divisor_indices]
_snake_case : Tuple = torch.max(lowerCAmelCase )
return largest_divisor, torch.div(lowerCAmelCase , lowerCAmelCase , rounding_mode='floor' )
class _lowerCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__a , direction='inputs' )
_snake_case : List[str] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_snake_case : Dict = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return self._config.num_heads
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : str = -1 , UpperCamelCase : Any = -1 , UpperCamelCase : Tuple = False , UpperCamelCase : str = None , ):
'''simple docstring'''
_snake_case : int = super(__a , self ).generate_dummy_inputs(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
# We need to order the input in the way they appears in the forward()
_snake_case : str = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case : Optional[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case : Optional[Any] = seqlen + 2
_snake_case : str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case : List[Any] = [
(torch.zeros(__a ), torch.zeros(__a )) for _ in range(self.num_layers )
]
_snake_case : Any = common_inputs['attention_mask']
if self.use_past:
_snake_case : Any = ordered_inputs['attention_mask'].dtype
_snake_case : Optional[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__a , __a , dtype=__a )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return 13
| 717 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase )
if number < 1:
_snake_case : int = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCAmelCase )
_snake_case : int = 1
for i in range(1 , lowerCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Optional[int] ="""efficientformer"""
def __init__( self : Dict , UpperCamelCase : List[int] = [3, 2, 6, 4] , UpperCamelCase : List[int] = [48, 96, 2_24, 4_48] , UpperCamelCase : List[bool] = [True, True, True, True] , UpperCamelCase : int = 4_48 , UpperCamelCase : int = 32 , UpperCamelCase : int = 4 , UpperCamelCase : int = 7 , UpperCamelCase : int = 5 , UpperCamelCase : int = 8 , UpperCamelCase : int = 4 , UpperCamelCase : float = 0.0 , UpperCamelCase : int = 16 , UpperCamelCase : int = 3 , UpperCamelCase : int = 3 , UpperCamelCase : int = 3 , UpperCamelCase : int = 2 , UpperCamelCase : int = 1 , UpperCamelCase : float = 0.0 , UpperCamelCase : int = 1 , UpperCamelCase : bool = True , UpperCamelCase : bool = True , UpperCamelCase : float = 1e-5 , UpperCamelCase : str = "gelu" , UpperCamelCase : float = 0.02 , UpperCamelCase : float = 1e-1_2 , UpperCamelCase : int = 2_24 , UpperCamelCase : float = 1e-0_5 , **UpperCamelCase : int , ) -> None:
'''simple docstring'''
super().__init__(**__A )
_snake_case : int = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Dict = hidden_sizes
_snake_case : List[str] = num_hidden_layers
_snake_case : Tuple = num_attention_heads
_snake_case : Optional[int] = initializer_range
_snake_case : Optional[int] = layer_norm_eps
_snake_case : Dict = patch_size
_snake_case : Dict = num_channels
_snake_case : Optional[Any] = depths
_snake_case : str = mlp_expansion_ratio
_snake_case : Any = downsamples
_snake_case : List[str] = dim
_snake_case : Union[str, Any] = key_dim
_snake_case : str = attention_ratio
_snake_case : Any = resolution
_snake_case : Any = pool_size
_snake_case : int = downsample_patch_size
_snake_case : List[str] = downsample_stride
_snake_case : str = downsample_pad
_snake_case : Dict = drop_path_rate
_snake_case : Union[str, Any] = num_metaad_blocks
_snake_case : List[str] = distillation
_snake_case : Optional[int] = use_layer_scale
_snake_case : List[Any] = layer_scale_init_value
_snake_case : Dict = image_size
_snake_case : Optional[int] = batch_norm_eps
| 718 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] =VOCAB_FILES_NAMES
a_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
a_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Any =LxmertTokenizer
def __init__( self : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=True , UpperCamelCase : List[str]="[UNK]" , UpperCamelCase : List[Any]="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Optional[int]="[MASK]" , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : List[Any] = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : Optional[int] = do_lower_case
_snake_case : Dict = strip_accents
_snake_case : Optional[int] = tokenize_chinese_chars
_snake_case : Optional[Any] = normalizer_class(**UpperCamelCase )
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : str=None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Tuple = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : int = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 0 |
def lowerCamelCase_ ( lowerCAmelCase: float , lowerCAmelCase: int )-> Dict:
if digit_amount > 0:
return round(number - int(_lowerCamelCase ) , _lowerCamelCase )
return number - int(_lowerCamelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 719 |
from __future__ import annotations
from random import random
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : int | None = None ):
'''simple docstring'''
_snake_case : str = value
_snake_case : List[Any] = random()
_snake_case : Node | None = None
_snake_case : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = str(self.value ) + ' '
_snake_case : List[Any] = str(self.left or '' )
_snake_case : int = str(self.right or '' )
return value + left + right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_snake_case , _snake_case : Optional[Any] = split(root.left , lowerCAmelCase )
return left, root
else:
_snake_case , _snake_case : List[str] = split(root.right , lowerCAmelCase )
return root, right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: Node | None )-> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_snake_case : str = merge(left.right , lowerCAmelCase )
return left
else:
_snake_case : Union[str, Any] = merge(lowerCAmelCase , right.left )
return right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case : Tuple = Node(lowerCAmelCase )
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , lowerCAmelCase )
return merge(merge(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , value - 1 )
_snake_case , _snake_case : List[str] = split(lowerCAmelCase , lowerCAmelCase )
return merge(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None )-> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: str )-> Node | None:
for arg in args.split():
if arg[0] == "+":
_snake_case : List[str] = insert(lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
_snake_case : Any = erase(lowerCAmelCase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowerCamelCase_ ( )-> None:
_snake_case : Tuple = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
_snake_case : List[Any] = input()
while args != "q":
_snake_case : int = interact_treap(lowerCAmelCase , lowerCAmelCase )
print(lowerCAmelCase )
_snake_case : Tuple = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 669 | 0 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Any =MobileBertTokenizer
a_ : Any =MobileBertTokenizerFast
a_ : int =True
a_ : int =True
a_ : Dict =filter_non_english
a_ : Union[str, Any] ='''google/mobilebert-uncased'''
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().setUp()
_snake_case : Optional[int] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_snake_case : Any = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : int ):
'''simple docstring'''
_snake_case : int = """UNwant\u00E9d,running"""
_snake_case : List[str] = """unwanted, running"""
return input_text, output_text
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = self.tokenizer_class(self.vocab_file )
_snake_case : str = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(A__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [9, 6, 7, 12, 10, 11] )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case : List[str] = self.get_tokenizer()
_snake_case : int = self.get_rust_tokenizer()
_snake_case : Optional[Any] = """UNwant\u00E9d,running"""
_snake_case : Optional[Any] = tokenizer.tokenize(A__ )
_snake_case : str = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_snake_case : List[str] = tokenizer.encode(A__ , add_special_tokens=A__ )
_snake_case : Dict = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
_snake_case : Optional[Any] = self.get_rust_tokenizer()
_snake_case : Optional[Any] = tokenizer.encode(A__ )
_snake_case : List[str] = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
# With lower casing
_snake_case : List[str] = self.get_tokenizer(do_lower_case=A__ )
_snake_case : List[str] = self.get_rust_tokenizer(do_lower_case=A__ )
_snake_case : Dict = """UNwant\u00E9d,running"""
_snake_case : str = tokenizer.tokenize(A__ )
_snake_case : int = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_snake_case : str = tokenizer.encode(A__ , add_special_tokens=A__ )
_snake_case : int = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
_snake_case : List[Any] = self.get_rust_tokenizer()
_snake_case : Optional[Any] = tokenizer.encode(A__ )
_snake_case : Optional[int] = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : str = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Dict = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=A__ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_snake_case : Tuple = {}
for i, token in enumerate(A__ ):
_snake_case : int = i
_snake_case : Dict = WordpieceTokenizer(vocab=A__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : int = self.get_tokenizer()
_snake_case : int = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(A__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : int = self.tokenizer_class.from_pretrained('google/mobilebert-uncased' )
_snake_case : List[Any] = tokenizer.encode('sequence builders' , add_special_tokens=A__ )
_snake_case : Optional[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=A__ )
_snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(A__ )
_snake_case : Dict = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : int = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_snake_case : int = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
_snake_case : str = tokenizer_r.encode_plus(
A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , )
_snake_case : Optional[int] = tokenizer_r.do_lower_case if hasattr(A__ , 'do_lower_case' ) else False
_snake_case : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Any = ["""的""", """人""", """有"""]
_snake_case : Union[str, Any] = """""".join(A__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Tuple = True
_snake_case : List[Any] = self.tokenizer_class.from_pretrained(A__ , **A__ )
_snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_snake_case : Dict = tokenizer_p.encode(A__ , add_special_tokens=A__ )
_snake_case : str = tokenizer_r.encode(A__ , add_special_tokens=A__ )
_snake_case : int = tokenizer_r.convert_ids_to_tokens(A__ )
_snake_case : int = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
_snake_case : str = False
_snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_snake_case : Optional[Any] = self.tokenizer_class.from_pretrained(A__ , **A__ )
_snake_case : Union[str, Any] = tokenizer_r.encode(A__ , add_special_tokens=A__ )
_snake_case : Union[str, Any] = tokenizer_p.encode(A__ , add_special_tokens=A__ )
_snake_case : Optional[Any] = tokenizer_r.convert_ids_to_tokens(A__ )
_snake_case : Any = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that only the first Chinese character is not preceded by "##".
_snake_case : Optional[int] = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(A__ )
]
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
| 720 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( lowerCAmelCase: str = N )-> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase , lowerCAmelCase : str(int(lowerCAmelCase ) * int(lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 | 0 |
lowerCAmelCase_ = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowerCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def lowerCamelCase_ ( lowerCAmelCase: str )-> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def lowerCamelCase_ ( lowerCAmelCase: str )-> str:
return "".join(REVERSE_DICT[char] for char in message.split() )
def lowerCamelCase_ ( )-> None:
_snake_case : Tuple = 'Morse code here!'
print(lowerCAmelCase__ )
_snake_case : Optional[int] = encrypt(lowerCAmelCase__ )
print(lowerCAmelCase__ )
_snake_case : Dict = decrypt(lowerCAmelCase__ )
print(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 721 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase_ ( )-> Any:
_snake_case : List[str] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_snake_case : Optional[Any] = Dataset.from_dict(lowerCAmelCase )
return dataset
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = get_dataset()
_snake_case : Tuple = make_duplicate_clusters(UpperCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = get_dataset()
_snake_case , _snake_case : str = deduplicate_dataset(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 2 )
print(UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCamelCase )
| 669 | 0 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : int=13 , UpperCamelCase : Dict=30 , UpperCamelCase : Optional[int]=2 , UpperCamelCase : Dict=3 , UpperCamelCase : Any=True , UpperCamelCase : List[Any]=True , UpperCamelCase : Tuple=32 , UpperCamelCase : Dict=2 , UpperCamelCase : Optional[Any]=4 , UpperCamelCase : List[str]=37 , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Tuple=3 , UpperCamelCase : List[Any]=None , ):
'''simple docstring'''
_snake_case : Optional[int] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Optional[int] = image_size
_snake_case : int = patch_size
_snake_case : Union[str, Any] = num_channels
_snake_case : List[Any] = is_training
_snake_case : int = use_labels
_snake_case : Optional[int] = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Tuple = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : Tuple = type_sequence_label_size
_snake_case : Optional[int] = initializer_range
_snake_case : Tuple = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case : str = (image_size // patch_size) ** 2
_snake_case : Tuple = num_patches + 1
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : int = None
if self.use_labels:
_snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : str = TFViTModel(config=UpperCamelCase__ )
_snake_case : Optional[int] = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
_snake_case : str = self.image_size // 2
_snake_case : Union[str, Any] = pixel_values[:, :, :image_size, :image_size]
_snake_case : int = model(UpperCamelCase__ , interpolate_pos_encoding=UpperCamelCase__ , training=UpperCamelCase__ )
_snake_case : Union[str, Any] = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : int , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.type_sequence_label_size
_snake_case : Optional[int] = TFViTForImageClassification(UpperCamelCase__ )
_snake_case : Optional[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
_snake_case : Tuple = self.image_size // 2
_snake_case : List[str] = pixel_values[:, :, :image_size, :image_size]
_snake_case : Union[str, Any] = model(UpperCamelCase__ , interpolate_pos_encoding=UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case : Optional[int] = 1
_snake_case : str = TFViTForImageClassification(UpperCamelCase__ )
_snake_case : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : List[Any] = config_and_inputs
_snake_case : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : str =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
a_ : Tuple =(
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
a_ : List[str] =False
a_ : Optional[int] =False
a_ : Any =False
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : int = TFViTModelTester(self )
_snake_case : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case , _snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : str = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_snake_case : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , tf.keras.layers.Layer ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[Any] = model_class(UpperCamelCase__ )
_snake_case : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : List[Any] = [*signature.parameters.keys()]
_snake_case : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_ ( )-> int:
_snake_case : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Dict = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
_snake_case : str = self.default_image_processor
_snake_case : str = prepare_img()
_snake_case : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='tf' )
# forward pass
_snake_case : Optional[int] = model(**UpperCamelCase__ )
# verify the logits
_snake_case : int = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
_snake_case : Dict = tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
| 700 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =["""image_processor""", """tokenizer"""]
a_ : Optional[int] ="""CLIPImageProcessor"""
a_ : Optional[Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
_snake_case : Optional[Any] = kwargs.pop('feature_extractor' )
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : Dict , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case : Optional[int] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
_snake_case : Optional[int] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 0 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase_ = """sshleifer/bart-tiny-random"""
lowerCAmelCase_ = """patrickvonplaten/t5-tiny-random"""
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return AutoConfig.from_pretrained(UpperCamelCase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case , *_snake_case : int = create_student_by_copying_alternating_layers(UpperCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case , *_snake_case : Tuple = create_student_by_copying_alternating_layers(UpperCamelCase , tempfile.mkdtemp() , e=1 , d=UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case , *_snake_case : List[Any] = create_student_by_copying_alternating_layers(UpperCamelCase , tempfile.mkdtemp() , e=1 , d=UpperCamelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case , *_snake_case : Tuple = create_student_by_copying_alternating_layers(UpperCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaises(UpperCamelCase ):
create_student_by_copying_alternating_layers(UpperCamelCase , tempfile.mkdtemp() , e=UpperCamelCase , d=UpperCamelCase )
| 701 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase_ = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase_ = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =200
a_ : List[str] ={"""Content-Length""": """100"""}
a_ : Tuple ={}
def UpperCamelCase_ ( self : Any , **UpperCamelCase : Any ):
'''simple docstring'''
return [bytes(UpperCamelCase , 'utf-8' )]
def lowerCamelCase_ ( *lowerCAmelCase: Tuple , **lowerCAmelCase: Tuple )-> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> Optional[Any]:
import requests
monkeypatch.setattr(lowerCAmelCase , 'request' , lowerCAmelCase )
_snake_case : List[str] = URL
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[int] = url
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = [url]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': url}
_snake_case : int = 'dummy'
_snake_case : Optional[Any] = 'downloads'
_snake_case : Union[str, Any] = tmp_path
_snake_case : Dict = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase , lowerCAmelCase ) , use_etag=lowerCAmelCase , )
_snake_case : str = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Optional[int] = dl_manager.download(lowerCAmelCase )
_snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = [downloaded_paths]
_snake_case : List[str] = [urls]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
_snake_case : Any = downloaded_paths.values()
_snake_case : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase , lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case : str = Path(lowerCAmelCase )
_snake_case : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case : List[str] = downloaded_path.read_text()
assert content == CONTENT
_snake_case : Any = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any )-> str:
_snake_case : str = str(lowerCAmelCase )
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : str = filename
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[Any] = [filename]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': filename}
_snake_case : Any = 'dummy'
_snake_case : Union[str, Any] = xz_file.parent
_snake_case : int = 'extracted'
_snake_case : Union[str, Any] = DownloadConfig(
cache_dir=lowerCAmelCase , use_etag=lowerCAmelCase , )
_snake_case : List[str] = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Dict = dl_manager.extract(lowerCAmelCase )
_snake_case : Optional[int] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = [extracted_paths]
_snake_case : int = [paths]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in extracted_paths.keys()
_snake_case : Optional[int] = extracted_paths.values()
_snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase , lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case : List[str] = Path(lowerCAmelCase )
_snake_case : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase , etag=lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case : Optional[int] = extracted_path.read_text()
_snake_case : int = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> Dict:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCAmelCase , start=1 ):
_snake_case : Dict = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> Dict:
_snake_case : List[str] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: int )-> str:
_snake_case : List[Any] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase_ ( lowerCAmelCase: Any )-> int:
_snake_case : Tuple = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase ) , start=1 ):
assert os.path.basename(lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 669 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = Dict[str, Any]
lowerCAmelCase_ = List[Prediction]
@add_end_docstrings(_UpperCAmelCase )
class _lowerCAmelCase ( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self : str , *UpperCamelCase : int , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
super().__init__(*lowercase__ , **lowercase__ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def UpperCamelCase_ ( self : Optional[int] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = {}
if "threshold" in kwargs:
_snake_case : Optional[int] = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self : int , *UpperCamelCase : int , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return super().__call__(*lowercase__ , **lowercase__ )
def UpperCamelCase_ ( self : Any , UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : List[str] = load_image(lowercase__ )
_snake_case : Dict = torch.IntTensor([[image.height, image.width]] )
_snake_case : int = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
_snake_case : List[str] = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
_snake_case : List[Any] = target_size
return inputs
def UpperCamelCase_ ( self : Any , UpperCamelCase : Tuple ):
'''simple docstring'''
_snake_case : Dict = model_inputs.pop('target_size' )
_snake_case : int = self.model(**lowercase__ )
_snake_case : Dict = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
_snake_case : str = model_inputs["bbox"]
return model_outputs
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=0.9 ):
'''simple docstring'''
_snake_case : Tuple = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_snake_case : Union[str, Any] = target_size[0].tolist()
def unnormalize(UpperCamelCase : Tuple ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
_snake_case : Union[str, Any] = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_snake_case : Dict = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_snake_case : List[Any] = [unnormalize(lowercase__ ) for bbox in model_outputs["bbox"].squeeze(0 )]
_snake_case : Dict = ["score", "label", "box"]
_snake_case : List[Any] = [dict(zip(lowercase__ , lowercase__ ) ) for vals in zip(scores.tolist() , lowercase__ , lowercase__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_snake_case : Tuple = self.image_processor.post_process_object_detection(lowercase__ , lowercase__ , lowercase__ )
_snake_case : List[str] = raw_annotations[0]
_snake_case : Dict = raw_annotation["scores"]
_snake_case : Tuple = raw_annotation["labels"]
_snake_case : Dict = raw_annotation["boxes"]
_snake_case : Dict = scores.tolist()
_snake_case : Optional[Any] = [self.model.config.idalabel[label.item()] for label in labels]
_snake_case : List[Any] = [self._get_bounding_box(lowercase__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_snake_case : str = ["score", "label", "box"]
_snake_case : Optional[Any] = [
dict(zip(lowercase__ , lowercase__ ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
_snake_case : str = box.int().tolist()
_snake_case : str = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 702 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""roberta"""
def __init__( self : int , UpperCamelCase : Tuple=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : List[Any]=12 , UpperCamelCase : str=12 , UpperCamelCase : Dict=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : List[str]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=1e-1_2 , UpperCamelCase : str=1 , UpperCamelCase : int=0 , UpperCamelCase : Any=2 , UpperCamelCase : int="absolute" , UpperCamelCase : int=True , UpperCamelCase : List[Any]=None , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Any = vocab_size
_snake_case : List[str] = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Dict = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Union[str, Any] = use_cache
_snake_case : str = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 0 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase: Dict )-> list:
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return []
_snake_case : List[str] = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
_snake_case : Optional[int] = int(max_value - min_value ) + 1
_snake_case : list[list] = [[] for _ in range(SCREAMING_SNAKE_CASE_ )]
for i in my_list:
buckets[int(i - min_value )].append(SCREAMING_SNAKE_CASE_ )
return [v for bucket in buckets for v in sorted(SCREAMING_SNAKE_CASE_ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 703 |
from random import randint, random
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: int = 5 , )-> list:
_snake_case : Dict = [[-1] * number_of_cells] # Create a highway without any car
_snake_case : List[str] = 0
_snake_case : List[str] = max(lowerCAmelCase , 0 )
while i < number_of_cells:
_snake_case : Optional[Any] = (
randint(0 , lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int )-> int:
_snake_case : Dict = 0
_snake_case : Optional[Any] = highway_now[car_index + 1 :]
for cell in range(len(lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCAmelCase , -1 )
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : List[Any] = len(lowerCAmelCase )
# Beforce calculations, the highway is empty
_snake_case : List[Any] = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_snake_case : int = min(highway_now[car_index] + 1 , lowerCAmelCase )
# Number of empty cell before the next car
_snake_case : Tuple = get_distance(lowerCAmelCase , lowerCAmelCase ) - 1
# We can't have the car causing an accident
_snake_case : Union[str, Any] = min(next_highway[car_index] , lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
_snake_case : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : Dict = len(highway[0] )
for i in range(lowerCAmelCase ):
_snake_case : Any = update(highway[i] , lowerCAmelCase , lowerCAmelCase )
_snake_case : Tuple = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
_snake_case : Union[str, Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_snake_case : Union[str, Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_snake_case : Tuple = speed
highway.append(lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _lowerCAmelCase ( __A ):
'''simple docstring'''
a_ : Optional[int] ="""unispeech"""
def __init__( self : List[str] , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Optional[Any]=7_68 , UpperCamelCase : Dict=12 , UpperCamelCase : Optional[int]=12 , UpperCamelCase : Any=30_72 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : int=0.1 , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : int=0.02 , UpperCamelCase : Union[str, Any]=1e-5 , UpperCamelCase : List[str]="group" , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : int=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , UpperCamelCase : Tuple=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase : Dict=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase : Optional[int]=False , UpperCamelCase : Any=1_28 , UpperCamelCase : Dict=16 , UpperCamelCase : int=False , UpperCamelCase : Any=True , UpperCamelCase : Any=0.05 , UpperCamelCase : Dict=10 , UpperCamelCase : int=2 , UpperCamelCase : str=0.0 , UpperCamelCase : Optional[Any]=10 , UpperCamelCase : Optional[Any]=0 , UpperCamelCase : int=3_20 , UpperCamelCase : str=2 , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : Tuple=1_00 , UpperCamelCase : Any=2_56 , UpperCamelCase : int=2_56 , UpperCamelCase : Dict=0.1 , UpperCamelCase : Optional[int]="mean" , UpperCamelCase : int=False , UpperCamelCase : Any=False , UpperCamelCase : str=2_56 , UpperCamelCase : int=80 , UpperCamelCase : Dict=0 , UpperCamelCase : Dict=1 , UpperCamelCase : List[Any]=2 , UpperCamelCase : Optional[Any]=0.5 , **UpperCamelCase : Dict , ):
'''simple docstring'''
super().__init__(**UpperCamelCase , pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase )
_snake_case : Dict = hidden_size
_snake_case : Dict = feat_extract_norm
_snake_case : List[Any] = feat_extract_activation
_snake_case : Optional[int] = list(UpperCamelCase )
_snake_case : Any = list(UpperCamelCase )
_snake_case : Optional[int] = list(UpperCamelCase )
_snake_case : str = conv_bias
_snake_case : Any = num_conv_pos_embeddings
_snake_case : List[str] = num_conv_pos_embedding_groups
_snake_case : List[Any] = len(self.conv_dim )
_snake_case : List[str] = num_hidden_layers
_snake_case : List[str] = intermediate_size
_snake_case : str = hidden_act
_snake_case : int = num_attention_heads
_snake_case : str = hidden_dropout
_snake_case : List[str] = attention_dropout
_snake_case : Tuple = activation_dropout
_snake_case : Optional[Any] = feat_proj_dropout
_snake_case : Optional[Any] = final_dropout
_snake_case : Union[str, Any] = layerdrop
_snake_case : int = layer_norm_eps
_snake_case : int = initializer_range
_snake_case : Any = num_ctc_classes
_snake_case : Optional[int] = vocab_size
_snake_case : Optional[Any] = do_stable_layer_norm
_snake_case : Union[str, Any] = use_weighted_layer_sum
_snake_case : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_snake_case : List[str] = apply_spec_augment
_snake_case : List[str] = mask_time_prob
_snake_case : List[Any] = mask_time_length
_snake_case : Any = mask_time_min_masks
_snake_case : Union[str, Any] = mask_feature_prob
_snake_case : Any = mask_feature_length
_snake_case : Any = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_snake_case : Any = num_codevectors_per_group
_snake_case : Dict = num_codevector_groups
_snake_case : Union[str, Any] = contrastive_logits_temperature
_snake_case : Tuple = feat_quantizer_dropout
_snake_case : Optional[int] = num_negatives
_snake_case : Union[str, Any] = codevector_dim
_snake_case : Tuple = proj_codevector_dim
_snake_case : Union[str, Any] = diversity_loss_weight
# ctc loss
_snake_case : Any = ctc_loss_reduction
_snake_case : int = ctc_zero_infinity
# pretraining loss
_snake_case : Optional[int] = replace_prob
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 704 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =VOCAB_FILES_NAMES
a_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
a_ : str =PRETRAINED_INIT_CONFIGURATION
a_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] =RealmTokenizer
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]="[UNK]" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Optional[Any]="[PAD]" , UpperCamelCase : Optional[int]="[CLS]" , UpperCamelCase : Optional[Any]="[MASK]" , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : int = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : List[str] = do_lower_case
_snake_case : List[Any] = strip_accents
_snake_case : Dict = tokenize_chinese_chars
_snake_case : Any = normalizer_class(**UpperCamelCase )
_snake_case : Optional[int] = do_lower_case
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[str] = kwargs.pop('text_pair' , UpperCamelCase )
_snake_case : int = kwargs.pop('return_tensors' , UpperCamelCase )
_snake_case : Optional[int] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCamelCase ):
if batch_text_pair is not None:
_snake_case : List[Any] = batch_text_pair[idx]
else:
_snake_case : Optional[Any] = None
_snake_case : Optional[int] = super().__call__(UpperCamelCase , UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
_snake_case : str = encoded_candidates.get('input_ids' )
_snake_case : Tuple = encoded_candidates.get('attention_mask' )
_snake_case : List[str] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCamelCase )
_snake_case : str = {key: item for key, item in output_data.items() if len(UpperCamelCase ) != 0}
return BatchEncoding(UpperCamelCase , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : int = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCAmelCase: List[str] )-> Dict:
_snake_case : Tuple = 'huggingface/label-files'
_snake_case : Tuple = 'imagenet-1k-id2label.json'
_snake_case : Dict = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='dataset' ) , 'r' ) )
_snake_case : List[str] = {int(__snake_case ): v for k, v in idalabel.items()}
_snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
_snake_case : List[str] = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_snake_case : Optional[int] = BitConfig(
conv_layer=__snake_case , num_labels=10_00 , idalabel=__snake_case , labelaid=__snake_case , )
return config
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> Any:
if "stem.conv" in name:
_snake_case : List[str] = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_snake_case : str = name.replace('blocks' , 'layers' )
if "head.fc" in name:
_snake_case : Any = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
_snake_case : List[str] = 'bit.' + name
if "bit" not in name and "classifier" not in name:
_snake_case : Tuple = 'bit.encoder.' + name
return name
def lowerCamelCase_ ( )-> Optional[int]:
_snake_case : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case : str = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCAmelCase: Dict , lowerCAmelCase: str , lowerCAmelCase: List[str]=False )-> Tuple:
_snake_case : List[str] = get_config(__snake_case )
# load original model from timm
_snake_case : List[Any] = create_model(__snake_case , pretrained=__snake_case )
timm_model.eval()
# load state_dict of original model
_snake_case : List[Any] = timm_model.state_dict()
for key in state_dict.copy().keys():
_snake_case : List[str] = state_dict.pop(__snake_case )
_snake_case : Optional[int] = val.squeeze() if 'head' in key else val
# load HuggingFace model
_snake_case : Union[str, Any] = BitForImageClassification(__snake_case )
model.eval()
model.load_state_dict(__snake_case )
# create image processor
_snake_case : Tuple = create_transform(**resolve_data_config({} , model=__snake_case ) )
_snake_case : Tuple = transform.transforms
_snake_case : str = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
_snake_case : Optional[Any] = BitImageProcessor(
do_resize=__snake_case , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__snake_case , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=__snake_case , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_snake_case : str = prepare_img()
_snake_case : List[Any] = transform(__snake_case ).unsqueeze(0 )
_snake_case : Any = processor(__snake_case , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(__snake_case , __snake_case )
# verify logits
with torch.no_grad():
_snake_case : str = model(__snake_case )
_snake_case : List[str] = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
_snake_case : Dict = timm_model(__snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case , outputs.logits , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
if push_to_hub:
print(F"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(F"""ybelkada/{model_name}""" )
processor.push_to_hub(F"""ybelkada/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowerCAmelCase_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 705 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] )-> Optional[int]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_snake_case : Tuple = TOKENIZER_CLASSES
else:
_snake_case : Union[str, Any] = {tokenizer_name: getattr(lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_snake_case : Dict = TOKENIZER_CLASSES[tokenizer_name]
_snake_case : Optional[Any] = True
if checkpoint_name is None:
_snake_case : Union[str, Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_snake_case : Optional[int] = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_snake_case : str = tokenizer_class.from_pretrained(lowerCAmelCase , force_download=lowerCAmelCase )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_snake_case , _snake_case : Tuple = checkpoint.split('/' )
_snake_case : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
elif add_prefix:
_snake_case : Dict = checkpoint
_snake_case : Optional[Any] = dump_path
else:
_snake_case : str = None
_snake_case : Union[str, Any] = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_snake_case : Optional[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_snake_case : Optional[int] = file_path.split(lowerCAmelCase )[-1][0]
if next_char == "/":
_snake_case : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
_snake_case : str = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_snake_case : Optional[int] = tokenizer.save_pretrained(
lowerCAmelCase , legacy_format=lowerCAmelCase , filename_prefix=lowerCAmelCase )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCAmelCase )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCAmelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 0 |
from string import ascii_uppercase
lowerCAmelCase_ = {str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Tuple )-> Any:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
_snake_case : Any = ''
_snake_case : Union[str, Any] = 0
_snake_case : str = 0
while div != 1:
_snake_case , _snake_case : Any = divmod(lowerCAmelCase__ , lowerCAmelCase__ )
if base >= 11 and 9 < mod < 36:
_snake_case : Dict = ALPHABET_VALUES[str(lowerCAmelCase__ )]
else:
_snake_case : List[str] = str(lowerCAmelCase__ )
new_value += actual_value
_snake_case : Tuple = num // base
_snake_case : List[Any] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(lowerCAmelCase__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 706 |
def lowerCamelCase_ ( lowerCAmelCase: bytes )-> str:
return "".join([hex(lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase )] )
def lowerCamelCase_ ( lowerCAmelCase: str )-> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCAmelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] = 50_00_00_00 )-> str:
_snake_case : int = set()
_snake_case : List[str] = int((limit - 24) ** (1 / 2) )
_snake_case : Any = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowerCAmelCase_ ) ) )
for primea in primes:
_snake_case : Union[str, Any] = primea * primea
for primea in primes:
_snake_case : Dict = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
_snake_case : int = primea * primea * primea * primea
_snake_case : int = square + cube + tetr
if total >= limit:
break
ret.add(lowerCAmelCase_ )
return len(lowerCAmelCase_ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 707 |
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
def lowerCamelCase_ ( lowerCAmelCase: str )-> None:
# authorize twitter, initialize tweepy
_snake_case : Optional[Any] = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
_snake_case : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : List[str] = api.user_timeline(screen_name=lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
_snake_case : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : Tuple = api.user_timeline(
screen_name=lowerCAmelCase , count=2_00 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
_snake_case : List[str] = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
_snake_case : Any = csv.writer(lowerCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 669 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCAmelCase_ = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ : List[Any] =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a_ : Optional[Any] =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a_ : Tuple ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a_ : int ={
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : int = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' )
_snake_case : Optional[Any] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(UpperCamelCase ) , [{'label': 'LABEL_0', 'score': 0.5_04}] )
_snake_case : Any = text_classifier('This is great !' , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase ) , [{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}] )
_snake_case : Any = text_classifier(['This is great !', 'This is bad'] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase ) , [
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
] , )
_snake_case : int = text_classifier('This is great !' , top_k=1 )
self.assertEqual(nested_simplify(UpperCamelCase ) , [{'label': 'LABEL_0', 'score': 0.5_04}] )
# Legacy behavior
_snake_case : str = text_classifier('This is great !' , return_all_scores=UpperCamelCase )
self.assertEqual(nested_simplify(UpperCamelCase ) , [{'label': 'LABEL_0', 'score': 0.5_04}] )
_snake_case : str = text_classifier('This is great !' , return_all_scores=UpperCamelCase )
self.assertEqual(
nested_simplify(UpperCamelCase ) , [[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}]] )
_snake_case : Optional[int] = text_classifier(['This is great !', 'Something else'] , return_all_scores=UpperCamelCase )
self.assertEqual(
nested_simplify(UpperCamelCase ) , [
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
] , )
_snake_case : Any = text_classifier(['This is great !', 'Something else'] , return_all_scores=UpperCamelCase )
self.assertEqual(
nested_simplify(UpperCamelCase ) , [
{'label': 'LABEL_0', 'score': 0.5_04},
{'label': 'LABEL_0', 'score': 0.5_04},
] , )
@require_torch
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
import torch
_snake_case : str = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , )
_snake_case : List[str] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(UpperCamelCase ) , [{'label': 'LABEL_0', 'score': 0.5_04}] )
@require_tf
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : int = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' )
_snake_case : str = text_classifier('This is great !' )
self.assertEqual(nested_simplify(UpperCamelCase ) , [{'label': 'LABEL_0', 'score': 0.5_04}] )
@slow
@require_torch
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = pipeline('text-classification' )
_snake_case : List[Any] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(UpperCamelCase ) , [{'label': 'POSITIVE', 'score': 1.0}] )
_snake_case : Any = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(UpperCamelCase ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
_snake_case : Optional[int] = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(UpperCamelCase ) , [{'label': 'POSITIVE', 'score': 0.9_88}] )
@slow
@require_tf
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : int = pipeline('text-classification' , framework='tf' )
_snake_case : Tuple = text_classifier('This is great !' )
self.assertEqual(nested_simplify(UpperCamelCase ) , [{'label': 'POSITIVE', 'score': 1.0}] )
_snake_case : Union[str, Any] = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(UpperCamelCase ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
_snake_case : List[str] = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(UpperCamelCase ) , [{'label': 'POSITIVE', 'score': 0.9_88}] )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_snake_case : str = TextClassificationPipeline(model=UpperCamelCase , tokenizer=UpperCamelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
_snake_case : int = 'HuggingFace is in'
_snake_case : str = text_classifier(UpperCamelCase )
self.assertEqual(nested_simplify(UpperCamelCase ) , [{'label': ANY(UpperCamelCase ), 'score': ANY(UpperCamelCase )}] )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
_snake_case : int = ['HuggingFace is in ', 'Paris is in France']
_snake_case : Union[str, Any] = text_classifier(UpperCamelCase )
self.assertEqual(
nested_simplify(UpperCamelCase ) , [{'label': ANY(UpperCamelCase ), 'score': ANY(UpperCamelCase )}, {'label': ANY(UpperCamelCase ), 'score': ANY(UpperCamelCase )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
_snake_case : List[str] = text_classifier(UpperCamelCase , top_k=UpperCamelCase )
_snake_case : Any = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(UpperCamelCase ) , [[{'label': ANY(UpperCamelCase ), 'score': ANY(UpperCamelCase )}] * N, [{'label': ANY(UpperCamelCase ), 'score': ANY(UpperCamelCase )}] * N] , )
_snake_case : List[Any] = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
_snake_case : Optional[Any] = text_classifier(UpperCamelCase )
self.assertEqual(
nested_simplify(UpperCamelCase ) , {'label': ANY(UpperCamelCase ), 'score': ANY(UpperCamelCase )} , )
self.assertTrue(outputs['label'] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
_snake_case : List[Any] = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(UpperCamelCase ):
text_classifier(UpperCamelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
_snake_case : str = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] )
self.assertEqual(
nested_simplify(UpperCamelCase ) , [{'label': ANY(UpperCamelCase ), 'score': ANY(UpperCamelCase )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
| 708 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[Union[str, Path]] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : bool =True
a_ : Optional[int] =None
a_ : int =1
a_ : Optional[Union[str, bool]] =None
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase ) for k, v in self.__dict__.items()} )
| 669 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase_ = ["""gpt2"""]
lowerCAmelCase_ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = tokenizer
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase )
_snake_case : int = TFGPTaLMHeadModel.from_config(UpperCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer(UpperCamelCase )
_snake_case : Union[str, Any] = tokenized['input_ids'].to_tensor()
_snake_case : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_snake_case : Tuple = self.model(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
_snake_case : Optional[int] = [GPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_snake_case : Tuple = [TFGPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case : Any = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_snake_case : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_snake_case : Optional[int] = tokenizer([test_inputs] , return_tensors='tf' )
_snake_case : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_snake_case : Dict = python_outputs[key].numpy()
_snake_case : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : str = tf.function(UpperCamelCase )
for test_inputs in self.test_sentences:
_snake_case : int = tf.constant(UpperCamelCase )
_snake_case : Tuple = compiled_tokenizer(UpperCamelCase )
_snake_case : int = tf_tokenizer(UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Union[str, Any] = ModelToSave(tokenizer=UpperCamelCase )
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Tuple = model.serving(UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case : str = Path(UpperCamelCase ) / 'saved.model'
tf.saved_model.save(UpperCamelCase , UpperCamelCase , signatures={'serving_default': model.serving} )
_snake_case : Optional[int] = tf.saved_model.load(UpperCamelCase )
_snake_case : List[str] = loaded_model.signatures['serving_default'](UpperCamelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Any = tf_tokenizer(UpperCamelCase ) # Build model with some sample inputs
_snake_case : Optional[Any] = tf_tokenizer.get_config()
_snake_case : Tuple = TFGPTaTokenizer.from_config(UpperCamelCase )
_snake_case : Optional[Any] = model_from_config(UpperCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_snake_case : Union[str, Any] = 12_31_23
for max_length in [3, 5, 10_24]:
_snake_case : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : List[str] = tf_tokenizer(UpperCamelCase , max_length=UpperCamelCase )
_snake_case : int = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 669 | 0 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCAmelCase_ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCAmelCase_ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCAmelCase_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> str:
_snake_case : int = len([g for position, g in enumerate(A_ ) if g == main_target[position]] )
return (item, float(A_ ))
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: List[Any] )-> Dict:
_snake_case : Tuple = random.randint(0 , len(A_ ) - 1 )
_snake_case : Dict = parent_a[:random_slice] + parent_a[random_slice:]
_snake_case : str = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase_ ( lowerCAmelCase: Dict , lowerCAmelCase: Any )-> List[Any]:
_snake_case : Union[str, Any] = list(A_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_snake_case : Tuple = random.choice(A_ )
return "".join(A_ )
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: List[str] , lowerCAmelCase: Optional[int] , )-> int:
_snake_case : int = []
# Generate more children proportionally to the fitness score.
_snake_case : List[str] = int(parent_a[1] * 1_00 ) + 1
_snake_case : Dict = 10 if child_n >= 10 else child_n
for _ in range(A_ ):
_snake_case : Union[str, Any] = population_score[random.randint(0 , A_ )][0]
_snake_case , _snake_case : Union[str, Any] = crossover(parent_a[0] , A_ )
# Append new string to the population list.
pop.append(mutate(A_ , A_ ) )
pop.append(mutate(A_ , A_ ) )
return pop
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Union[str, Any] = True )-> int:
if N_POPULATION < N_SELECTED:
_snake_case : List[Any] = F"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(A_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_snake_case : Union[str, Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_snake_case : Union[str, Any] = F"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(A_ )
# Generate random starting population.
_snake_case : Optional[int] = []
for _ in range(A_ ):
population.append(''.join([random.choice(A_ ) for i in range(len(A_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_snake_case , _snake_case : Optional[int] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(A_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_snake_case : str = [evaluate(A_ , A_ ) for item in population]
# Check if there is a matching evolution.
_snake_case : List[Any] = sorted(A_ , key=lambda lowerCAmelCase : x[1] , reverse=A_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"""\nGeneration: {generation}"""
F"""\nTotal Population:{total_population}"""
F"""\nBest score: {population_score[0][1]}"""
F"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_snake_case : List[str] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(A_ )
# Normalize population score to be between 0 and 1.
_snake_case : List[Any] = [
(item, score / len(A_ )) for item, score in population_score
]
# This is selection
for i in range(A_ ):
population.extend(select(population_score[int(A_ )] , A_ , A_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(A_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCAmelCase_ = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
lowerCAmelCase_ = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\"""
)
lowerCAmelCase_ = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 710 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> list:
_snake_case : List[Any] = int(lowerCAmelCase )
if n_element < 1:
_snake_case : int = ValueError('a should be a positive number' )
raise my_error
_snake_case : Union[str, Any] = [1]
_snake_case , _snake_case , _snake_case : Any = (0, 0, 0)
_snake_case : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 669 | 0 |
import heapq as hq
import math
from collections.abc import Iterator
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : Tuple ):
'''simple docstring'''
_snake_case : Tuple = str(id_ )
_snake_case : Optional[int] = None
_snake_case : List[str] = None
_snake_case : int = []
_snake_case : int = {} # {vertex:distance}
def __lt__( self : Optional[int] , UpperCamelCase : Dict ):
'''simple docstring'''
return self.key < other.key
def __repr__( self : str ):
'''simple docstring'''
return self.id
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Optional[int] ):
'''simple docstring'''
self.neighbors.append(_A )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : str = weight
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: List[Any] )-> str:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowerCAmelCase )
graph[b - 1].add_edge(graph[a - 1] , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: int )-> list:
_snake_case : Optional[Any] = []
for u in graph:
_snake_case : str = math.inf
_snake_case : int = None
_snake_case : Tuple = 0
_snake_case : Union[str, Any] = graph[:]
while q:
_snake_case : Tuple = min(lowerCAmelCase )
q.remove(lowerCAmelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_snake_case : Dict = u
_snake_case : List[Any] = u.edges[v.id]
for i in range(1 , len(lowerCAmelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowerCamelCase_ ( lowerCAmelCase: Dict , lowerCAmelCase: Optional[Any] )-> Iterator[tuple]:
for u in graph:
_snake_case : Any = math.inf
_snake_case : Any = None
_snake_case : Optional[int] = 0
_snake_case : Any = list(lowerCAmelCase )
hq.heapify(lowerCAmelCase )
while h:
_snake_case : Optional[Any] = hq.heappop(lowerCAmelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_snake_case : Optional[int] = u
_snake_case : Dict = u.edges[v.id]
hq.heapify(lowerCAmelCase )
for i in range(1 , len(lowerCAmelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowerCamelCase_ ( )-> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Tuple="shi-labs/oneformer_demo" )-> Any:
with open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) as f:
_snake_case : str = json.load(lowerCAmelCase )
_snake_case : List[str] = {}
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = []
for key, info in class_info.items():
_snake_case : Optional[int] = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase ) )
_snake_case : List[str] = thing_ids
_snake_case : Optional[Any] = class_names
return metadata
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any=7 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Dict=30 , UpperCamelCase : int=4_00 , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : str=True , UpperCamelCase : Any=[0.5, 0.5, 0.5] , UpperCamelCase : int=[0.5, 0.5, 0.5] , UpperCamelCase : Dict=10 , UpperCamelCase : Dict=False , UpperCamelCase : Dict=2_55 , UpperCamelCase : Dict="shi-labs/oneformer_demo" , UpperCamelCase : Optional[int]="ade20k_panoptic.json" , UpperCamelCase : Tuple=10 , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Tuple = num_channels
_snake_case : List[str] = min_resolution
_snake_case : List[str] = max_resolution
_snake_case : Optional[Any] = do_resize
_snake_case : Optional[Any] = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
_snake_case : Optional[int] = do_normalize
_snake_case : Any = image_mean
_snake_case : List[Any] = image_std
_snake_case : Any = class_info_file
_snake_case : List[str] = prepare_metadata(UpperCamelCase , UpperCamelCase )
_snake_case : Any = num_text
_snake_case : str = repo_path
# for the post_process_functions
_snake_case : Optional[Any] = 2
_snake_case : str = 10
_snake_case : Union[str, Any] = 10
_snake_case : List[Any] = 3
_snake_case : str = 4
_snake_case : List[Any] = num_labels
_snake_case : str = do_reduce_labels
_snake_case : List[str] = ignore_index
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
if not batched:
_snake_case : Any = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
_snake_case , _snake_case : Any = image.size
else:
_snake_case , _snake_case : Any = image.shape[1], image.shape[2]
if w < h:
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
_snake_case : Any = self.size['shortest_edge']
elif w > h:
_snake_case : int = self.size['shortest_edge']
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
_snake_case : Dict = self.size['shortest_edge']
_snake_case : Dict = self.size['shortest_edge']
else:
_snake_case : List[Any] = []
for image in image_inputs:
_snake_case , _snake_case : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : List[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
_snake_case : Optional[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ : Any =image_processing_class
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCamelCase , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCamelCase , 'num_text' ) )
self.assertTrue(hasattr(UpperCamelCase , 'repo_path' ) )
self.assertTrue(hasattr(UpperCamelCase , 'metadata' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_reduce_labels' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : Optional[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : List[Any] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : int = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
_snake_case : int = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : Optional[int] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
_snake_case : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : List[str] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Tuple=False , UpperCamelCase : str=False , UpperCamelCase : Dict="np" ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_snake_case : List[str] = self.image_processing_tester.num_labels
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
if with_segmentation_maps:
_snake_case : Optional[int] = num_labels
if is_instance_map:
_snake_case : Union[str, Any] = list(range(UpperCamelCase ) ) * 2
_snake_case : Tuple = dict(enumerate(UpperCamelCase ) )
_snake_case : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_snake_case : int = [Image.fromarray(UpperCamelCase ) for annotation in annotations]
_snake_case : List[Any] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , UpperCamelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCamelCase , pad_and_return_pixel_mask=UpperCamelCase , )
return inputs
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
def common(UpperCamelCase : Any=False , UpperCamelCase : int=None ):
_snake_case : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase , is_instance_map=UpperCamelCase , segmentation_type=UpperCamelCase )
_snake_case : Union[str, Any] = inputs['mask_labels']
_snake_case : Optional[int] = inputs['class_labels']
_snake_case : Optional[int] = inputs['pixel_values']
_snake_case : Optional[Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.zeros((20, 50) )
_snake_case : int = 1
_snake_case : int = 1
_snake_case : Optional[Any] = 1
_snake_case : List[Any] = binary_mask_to_rle(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = fature_extractor.post_process_semantic_segmentation(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_snake_case : Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_snake_case : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(UpperCamelCase , target_sizes=UpperCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : int = image_processor.post_process_instance_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = image_processor.post_process_panoptic_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 669 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""",
},
"""emoji_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""",
},
}
lowerCAmelCase_ = {
"""abeja/gpt-neox-japanese-2.7b""": 2048,
}
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> str:
with open(__snake_case , 'r' , encoding='utf-8' ) as f:
_snake_case : int = json.loads(f.read() )
_snake_case : Optional[int] = collections.OrderedDict()
_snake_case : Optional[int] = collections.OrderedDict()
_snake_case : Dict = collections.OrderedDict()
with open(__snake_case , 'r' , encoding='utf-8' ) as f:
_snake_case : Optional[Any] = f.readlines()
_snake_case : Optional[int] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(__snake_case ):
_snake_case : List[Any] = b
_snake_case : Tuple = idx
for wd in b:
_snake_case : int = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[str] =VOCAB_FILES_NAMES
a_ : Any =PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[int] =["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Union[str, Any]="<|endoftext|>" , UpperCamelCase : str="<|endoftext|>" , UpperCamelCase : Tuple="<|startoftext|>" , UpperCamelCase : List[str]="<|endoftext|>" , UpperCamelCase : Union[str, Any]=False , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(
unk_token=__a , pad_token=__a , bos_token=__a , eos_token=__a , do_clean_text=__a , **__a , )
if not os.path.isfile(__a ):
raise ValueError(
f"""Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained"""
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(__a ):
raise ValueError(
f"""Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google"""
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
_snake_case : Dict = do_clean_text
_snake_case , _snake_case , _snake_case , _snake_case : List[str] = load_vocab_and_emoji(__a , __a )
_snake_case : Optional[int] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return len(self.raw_vocab )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Any ):
'''simple docstring'''
return self.subword_tokenizer.tokenize(__a , clean=self.do_clean_text )
def UpperCamelCase_ ( self : str , UpperCamelCase : Dict ):
'''simple docstring'''
return self.vocab.get(__a , self.vocab.get(self.unk_token ) )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Dict ):
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(__a )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : int = ''.join(__a ).strip()
return out_string
def UpperCamelCase_ ( self : int , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
_snake_case : Optional[int] = input_ids[-self.model_max_length :]
return input_ids
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : int , UpperCamelCase : Any = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = 0
if os.path.isdir(__a ):
_snake_case : str = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : int = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
_snake_case : Dict = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
_snake_case : Union[str, Any] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(__a , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
_snake_case : Tuple = token_index
writer.write(','.join(__a ) + '\n' )
index += 1
with open(__a , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , __a )
return vocab_file, emoji_file
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = vocab # same as swe
_snake_case : Optional[int] = ids_to_tokens # same as bpe
_snake_case : Any = emoji
_snake_case : Tuple = np.max([len(__a ) for w in self.vocab.keys()] )
_snake_case : Optional[int] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
_snake_case : List[str] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
_snake_case : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
_snake_case : Union[str, Any] = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
_snake_case : List[str] = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
_snake_case : int = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
_snake_case : Optional[Any] = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
_snake_case : Optional[Any] = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
_snake_case : List[str] = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self : Dict ):
'''simple docstring'''
return len(self.ids_to_tokens )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = self.content_repattera.sub('<URL>' , __a )
_snake_case : Union[str, Any] = self.content_repattera.sub('<EMAIL>' , __a )
_snake_case : Any = self.content_repattera.sub('<TEL>' , __a )
_snake_case : Optional[int] = self.content_repattera.sub('<DATE>' , __a )
_snake_case : Any = self.content_repattera.sub('<DATE>' , __a )
_snake_case : Tuple = self.content_repattera.sub('<PRICE>' , __a )
_snake_case : Optional[Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
_snake_case : Union[str, Any] = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any]=False ):
'''simple docstring'''
_snake_case : List[str] = text.replace(' ' , '<SP>' )
_snake_case : Optional[int] = text.replace(' ' , '<SP>' )
_snake_case : List[Any] = text.replace('\r\n' , '<BR>' )
_snake_case : List[Any] = text.replace('\n' , '<BR>' )
_snake_case : Optional[Any] = text.replace('\r' , '<BR>' )
_snake_case : Dict = text.replace('\t' , '<TAB>' )
_snake_case : Optional[Any] = text.replace('—' , 'ー' )
_snake_case : List[Any] = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
_snake_case : Tuple = text.replace(__a , __a )
if clean:
_snake_case : Tuple = self.clean_text(__a )
def check_simbol(UpperCamelCase : List[Any] ):
_snake_case : Union[str, Any] = x.encode()
if len(__a ) == 1 and len(__a ) == 2:
_snake_case : Union[str, Any] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(UpperCamelCase : Tuple ):
_snake_case : Union[str, Any] = x.encode()
if len(__a ) == 1 and len(__a ) == 3:
_snake_case : Tuple = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28080 and c <= 0xe2b07f:
return True
return False
_snake_case : int = 0
_snake_case : Optional[int] = []
while pos < len(__a ):
_snake_case : List[Any] = min(len(__a ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
_snake_case : Tuple = [] # (token_id, token, pos)
for e in range(__a , __a , -1 ):
_snake_case : List[Any] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__a ) > 2:
_snake_case : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__a ) > 0:
# the smallest token_id is adopted
_snake_case , _snake_case , _snake_case : List[str] = sorted(__a , key=lambda UpperCamelCase : x[0] )[0]
result.append(__a )
_snake_case : str = e
else:
_snake_case : Dict = pos + 1
_snake_case : Dict = text[pos:end]
if check_simbol(__a ):
result.append('<KIGOU>' )
elif checkuae(__a ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
_snake_case : Any = end
return result
def UpperCamelCase_ ( self : str , UpperCamelCase : int , UpperCamelCase : Union[str, Any]="\n" ):
'''simple docstring'''
_snake_case : int = []
_snake_case : int = []
_snake_case : str = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__a ) > 0:
words.append(bytearray(__a ).decode('utf-8' , errors='replace' ) )
_snake_case : str = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(__a )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(__a )
if len(__a ) > 0:
words.append(bytearray(__a ).decode('utf-8' , errors='replace' ) )
_snake_case : Any = ''.join(__a )
return text
| 712 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase_ = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCamelCase_ ( )-> Tuple:
_snake_case : int = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_snake_case : int = get_sagemaker_input()
else:
_snake_case : Any = get_cluster_input()
return config
def lowerCamelCase_ ( lowerCAmelCase: str=None )-> Any:
if subparsers is not None:
_snake_case : List[Any] = subparsers.add_parser('config' , description=lowerCAmelCase )
else:
_snake_case : Dict = argparse.ArgumentParser('Accelerate config command' , description=lowerCAmelCase )
parser.add_argument(
'--config_file' , default=lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Any:
_snake_case : Dict = get_user_input()
if args.config_file is not None:
_snake_case : List[str] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
_snake_case : Union[str, Any] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowerCAmelCase )
else:
config.to_yaml_file(lowerCAmelCase )
print(F"""accelerate configuration saved at {config_file}""" )
def lowerCamelCase_ ( )-> Dict:
_snake_case : List[str] = config_command_parser()
_snake_case : str = parser.parse_args()
config_command(lowerCAmelCase )
if __name__ == "__main__":
main()
| 669 | 0 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowerCAmelCase :
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( lowerCAmelCase: List[str] )-> Any:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCAmelCase_ = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ : List[Any] =MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : Tuple = pipeline(
'document-question-answering' , model=lowerCamelCase_ , tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : List[str] = INVOICE_URL
_snake_case : Tuple = list(zip(*apply_tesseract(load_image(lowerCamelCase_ ) , lowerCamelCase_ , '' ) ) )
_snake_case : List[str] = """What is the placebo?"""
_snake_case : Dict = [
{
"""image""": load_image(lowerCamelCase_ ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : List[str] = dqa_pipeline(lowerCamelCase_ , top_k=2 )
self.assertEqual(
lowerCamelCase_ , [
[
{'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ ), 'start': ANY(lowerCamelCase_ ), 'end': ANY(lowerCamelCase_ )},
{'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ ), 'start': ANY(lowerCamelCase_ ), 'end': ANY(lowerCamelCase_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
_snake_case : Optional[int] = INVOICE_URL
_snake_case : List[Any] = """How many cats are there?"""
_snake_case : Union[str, Any] = [
{"""score""": 0.00_01, """answer""": """oy 2312/2019""", """start""": 38, """end""": 39},
{"""score""": 0.00_01, """answer""": """oy 2312/2019 DUE""", """start""": 38, """end""": 40},
]
_snake_case : Any = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase_ , decimals=4 ) , lowerCamelCase_ )
_snake_case : Optional[Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase_ , decimals=4 ) , lowerCamelCase_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_snake_case : str = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_snake_case : Optional[Any] = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(lowerCamelCase_ , [] )
# We can optionnally pass directly the words and bounding boxes
_snake_case : Optional[int] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_snake_case : str = []
_snake_case : List[str] = []
_snake_case : Optional[Any] = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , words=lowerCamelCase_ , boxes=lowerCamelCase_ , top_k=2 )
self.assertEqual(lowerCamelCase_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
_snake_case : List[str] = INVOICE_URL
_snake_case : Optional[Any] = """What is the invoice number?"""
_snake_case : Tuple = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
_snake_case : Tuple = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
_snake_case : int = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , )
_snake_case : List[Any] = INVOICE_URL
_snake_case : Union[str, Any] = """What is the invoice number?"""
_snake_case : Optional[int] = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
_snake_case : Union[str, Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
_snake_case : Union[str, Any] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : int = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowerCamelCase_ )
_snake_case : List[str] = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowerCamelCase_ , revision='3dc6de3' , )
_snake_case : Optional[Any] = INVOICE_URL
_snake_case : int = """What is the invoice number?"""
_snake_case : int = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
_snake_case : Any = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
_snake_case : int = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 , )
_snake_case : Dict = list(zip(*apply_tesseract(load_image(lowerCamelCase_ ) , lowerCamelCase_ , '' ) ) )
# This model should also work if `image` is set to None
_snake_case : Union[str, Any] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowerCamelCase_ )
_snake_case : Optional[Any] = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowerCamelCase_ , revision='3dc6de3' , max_seq_len=50 , )
_snake_case : int = INVOICE_URL
_snake_case : Tuple = """What is the invoice number?"""
_snake_case : Dict = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
_snake_case : str = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
_snake_case : Dict = list(zip(*apply_tesseract(load_image(lowerCamelCase_ ) , lowerCamelCase_ , '' ) ) )
# This model should also work if `image` is set to None
_snake_case : int = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
@slow
@require_torch
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Any = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
_snake_case : List[str] = INVOICE_URL
_snake_case : int = """What is the invoice number?"""
_snake_case : Optional[Any] = dqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase_ , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
| 713 |
# Function to print upper half of diamond (pyramid)
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> List[str]:
for i in range(0 , lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> List[Any]:
for i in range(lowerCAmelCase , 0 , -1 ):
for _ in range(lowerCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> int:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase ) # upper half
reverse_floyd(lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
lowerCAmelCase_ = 1
while K:
lowerCAmelCase_ = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
lowerCAmelCase_ = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 669 | 0 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCAmelCase ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : CLIPSegForImageSegmentation , UpperCamelCase : CLIPSegProcessor , UpperCamelCase : AutoencoderKL , UpperCamelCase : CLIPTextModel , UpperCamelCase : CLIPTokenizer , UpperCamelCase : UNetaDConditionModel , UpperCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase : StableDiffusionSafetyChecker , UpperCamelCase : CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , 'steps_offset' ) and scheduler.config.steps_offset != 1:
_snake_case : str = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('steps_offset!=1' , '1.0.0' , UpperCamelCase_ , standard_warn=UpperCamelCase_ )
_snake_case : int = dict(scheduler.config )
_snake_case : Dict = 1
_snake_case : Union[str, Any] = FrozenDict(UpperCamelCase_ )
if hasattr(scheduler.config , 'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False:
_snake_case : Tuple = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('skip_prk_steps not set' , '1.0.0' , UpperCamelCase_ , standard_warn=UpperCamelCase_ )
_snake_case : Dict = dict(scheduler.config )
_snake_case : Optional[int] = True
_snake_case : Union[str, Any] = FrozenDict(UpperCamelCase_ )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
segmentation_model=UpperCamelCase_ , segmentation_processor=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_snake_case : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase_ )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
self.enable_attention_slicing(UpperCamelCase_ )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_snake_case : Union[str, Any] = torch.device('cuda' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.device != torch.device('meta' ) or not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase : Union[str, List[str]] , UpperCamelCase : Union[torch.FloatTensor, PIL.Image.Image] , UpperCamelCase : str , UpperCamelCase : int = 5_12 , UpperCamelCase : int = 5_12 , UpperCamelCase : int = 50 , UpperCamelCase : float = 7.5 , UpperCamelCase : Optional[Union[str, List[str]]] = None , UpperCamelCase : Optional[int] = 1 , UpperCamelCase : float = 0.0 , UpperCamelCase : Optional[torch.Generator] = None , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase : int = 1 , **UpperCamelCase : Any , ):
'''simple docstring'''
_snake_case : str = self.segmentation_processor(
text=[text] , images=[image] , padding='max_length' , return_tensors='pt' ).to(self.device )
_snake_case : Dict = self.segmentation_model(**UpperCamelCase_ )
_snake_case : Optional[int] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
_snake_case : Union[str, Any] = self.numpy_to_pil(UpperCamelCase_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
_snake_case : Optional[Any] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , height=UpperCamelCase_ , width=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , negative_prompt=UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ , eta=UpperCamelCase_ , generator=UpperCamelCase_ , latents=UpperCamelCase_ , output_type=UpperCamelCase_ , return_dict=UpperCamelCase_ , callback=UpperCamelCase_ , callback_steps=UpperCamelCase_ , )
| 714 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""audio-spectrogram-transformer"""
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any]=7_68 , UpperCamelCase : int=12 , UpperCamelCase : str=12 , UpperCamelCase : Tuple=30_72 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Any=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : str=16 , UpperCamelCase : List[Any]=True , UpperCamelCase : Any=10 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : int=10_24 , UpperCamelCase : Optional[Any]=1_28 , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : Tuple = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : int = patch_size
_snake_case : List[str] = qkv_bias
_snake_case : int = frequency_stride
_snake_case : List[Any] = time_stride
_snake_case : List[Any] = max_length
_snake_case : List[str] = num_mel_bins
| 669 | 0 |
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] = 1_00_00_00 )-> Any:
_snake_case : str = 1
_snake_case : Dict = 1
_snake_case : Union[str, Any] = {1: 1}
for inputa in range(2 , lowerCamelCase__ ):
_snake_case : Any = 0
_snake_case : Optional[int] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_snake_case : Optional[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
_snake_case : Union[str, Any] = counter
if counter > pre_counter:
_snake_case : Optional[int] = inputa
_snake_case : Dict = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 715 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: bool = True , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: bool = False , lowerCAmelCase: float = 1_00 , lowerCAmelCase: float = 0.0_1 , lowerCAmelCase: float = 1 , )-> Any:
_snake_case : int = False
_snake_case : Any = search_prob
_snake_case : Tuple = start_temperate
_snake_case : Any = []
_snake_case : List[str] = 0
_snake_case : Optional[Any] = None
while not search_end:
_snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case : Dict = current_state
scores.append(lowerCAmelCase )
iterations += 1
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case : Dict = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
_snake_case : int = neighbors.pop(lowerCAmelCase )
_snake_case : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case : Union[str, Any] = picked_neighbor
else:
_snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case : int = picked_neighbor
_snake_case : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case : List[str] = True
else:
_snake_case : Union[str, Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[Any] )-> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Dict:
return (3 * x**2) - (6 * y)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 669 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""decision_transformer"""
a_ : Dict =["""past_key_values"""]
a_ : Optional[int] ={
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Dict , UpperCamelCase : Union[str, Any]=17 , UpperCamelCase : int=4 , UpperCamelCase : List[str]=1_28 , UpperCamelCase : List[Any]=40_96 , UpperCamelCase : Dict=True , UpperCamelCase : Any=1 , UpperCamelCase : Optional[int]=10_24 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : List[str]=None , UpperCamelCase : Any="relu" , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : List[Any]=1e-5 , UpperCamelCase : Union[str, Any]=0.02 , UpperCamelCase : int=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : List[Any]=5_02_56 , UpperCamelCase : List[Any]=5_02_56 , UpperCamelCase : Tuple=False , UpperCamelCase : Any=False , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
_snake_case : List[Any] = state_dim
_snake_case : int = act_dim
_snake_case : Optional[int] = hidden_size
_snake_case : Dict = max_ep_len
_snake_case : List[str] = action_tanh
_snake_case : Any = vocab_size
_snake_case : Tuple = n_positions
_snake_case : List[str] = n_layer
_snake_case : str = n_head
_snake_case : str = n_inner
_snake_case : int = activation_function
_snake_case : Tuple = resid_pdrop
_snake_case : List[Any] = embd_pdrop
_snake_case : int = attn_pdrop
_snake_case : Optional[Any] = layer_norm_epsilon
_snake_case : str = initializer_range
_snake_case : str = scale_attn_weights
_snake_case : List[Any] = use_cache
_snake_case : Optional[Any] = scale_attn_by_inverse_layer_idx
_snake_case : int = reorder_and_upcast_attn
_snake_case : Any = bos_token_id
_snake_case : Union[str, Any] = eos_token_id
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 716 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : torch.FloatTensor
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : str , UpperCamelCase : int = 32 , UpperCamelCase : int = 64 , UpperCamelCase : int = 20 , UpperCamelCase : int = 7_68 , UpperCamelCase : Optional[int]=77 , UpperCamelCase : int=4 , UpperCamelCase : float = 0.0 , UpperCamelCase : str = "silu" , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = "linear" , UpperCamelCase : Optional[str] = "prd" , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case : str = num_attention_heads
_snake_case : Optional[int] = attention_head_dim
_snake_case : Any = num_attention_heads * attention_head_dim
_snake_case : List[Any] = additional_embeddings
_snake_case : List[str] = time_embed_dim or inner_dim
_snake_case : int = embedding_proj_dim or embedding_dim
_snake_case : List[Any] = clip_embed_dim or embedding_dim
_snake_case : Optional[Any] = Timesteps(UpperCamelCase , UpperCamelCase , 0 )
_snake_case : List[Any] = TimestepEmbedding(UpperCamelCase , UpperCamelCase , out_dim=UpperCamelCase , act_fn=UpperCamelCase )
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
if embedding_proj_norm_type is None:
_snake_case : str = None
elif embedding_proj_norm_type == "layer":
_snake_case : List[Any] = nn.LayerNorm(UpperCamelCase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_snake_case : str = nn.Linear(UpperCamelCase , UpperCamelCase )
if encoder_hid_proj_type is None:
_snake_case : Any = None
elif encoder_hid_proj_type == "linear":
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase ) )
if added_emb_type == "prd":
_snake_case : str = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase ) )
elif added_emb_type is None:
_snake_case : Dict = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_snake_case : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , activation_fn='gelu' , attention_bias=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
if norm_in_type == "layer":
_snake_case : Optional[int] = nn.LayerNorm(UpperCamelCase )
elif norm_in_type is None:
_snake_case : Optional[Any] = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
_snake_case : Optional[Any] = nn.LayerNorm(UpperCamelCase )
_snake_case : Union[str, Any] = nn.Linear(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_snake_case : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , UpperCamelCase , persistent=UpperCamelCase )
_snake_case : str = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = {}
def fn_recursive_add_processors(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase , 'set_processor' ):
_snake_case : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return processors
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
_snake_case : Optional[int] = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Union[str, Any] ):
if hasattr(UpperCamelCase , 'set_processor' ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
module.set_processor(UpperCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Union[torch.Tensor, float, int] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[torch.BoolTensor] = None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_snake_case : Dict = hidden_states.shape[0]
_snake_case : str = timestep
if not torch.is_tensor(UpperCamelCase ):
_snake_case : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase ) and len(timesteps.shape ) == 0:
_snake_case : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case : Optional[int] = timesteps * torch.ones(UpperCamelCase , dtype=timesteps.dtype , device=timesteps.device )
_snake_case : Union[str, Any] = self.time_proj(UpperCamelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_snake_case : Tuple = timesteps_projected.to(dtype=self.dtype )
_snake_case : List[Any] = self.time_embedding(UpperCamelCase )
if self.embedding_proj_norm is not None:
_snake_case : Optional[Any] = self.embedding_proj_norm(UpperCamelCase )
_snake_case : Union[str, Any] = self.embedding_proj(UpperCamelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_snake_case : Dict = self.encoder_hidden_states_proj(UpperCamelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_snake_case : str = self.proj_in(UpperCamelCase )
_snake_case : int = self.positional_embedding.to(hidden_states.dtype )
_snake_case : Optional[int] = []
_snake_case : List[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_snake_case : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_snake_case : str = hidden_states[:, None, :]
_snake_case : str = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_snake_case : int = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase , -1 , -1 )
additional_embeds.append(UpperCamelCase )
_snake_case : Optional[int] = torch.cat(
UpperCamelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_snake_case : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_snake_case : Optional[Any] = F.pad(
UpperCamelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_snake_case : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_snake_case : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_snake_case : Tuple = F.pad(UpperCamelCase , (0, self.additional_embeddings) , value=0.0 )
_snake_case : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_snake_case : str = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_snake_case : Tuple = self.norm_in(UpperCamelCase )
for block in self.transformer_blocks:
_snake_case : Any = block(UpperCamelCase , attention_mask=UpperCamelCase )
_snake_case : Dict = self.norm_out(UpperCamelCase )
if self.prd_embedding is not None:
_snake_case : str = hidden_states[:, -1]
else:
_snake_case : Any = hidden_states[:, additional_embeddings_len:]
_snake_case : List[Any] = self.proj_to_clip_embeddings(UpperCamelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 669 | 0 |
'''simple docstring'''
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase : Any , UpperCamelCase : List[str]=13 , UpperCamelCase : List[str]=7 , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : int=True , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Optional[int]=32 , UpperCamelCase : Dict=5 , UpperCamelCase : Any=4 , UpperCamelCase : str=4 , UpperCamelCase : Dict="gelu" , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : Any=0.1 , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=5_12 , UpperCamelCase : Optional[int]=16 , UpperCamelCase : int=2 , UpperCamelCase : Any=0.02 , UpperCamelCase : Dict=3 , UpperCamelCase : List[str]=4 , UpperCamelCase : Any=None , ):
'''simple docstring'''
_snake_case : int = parent
_snake_case : List[Any] = batch_size
_snake_case : Optional[int] = seq_length
_snake_case : Tuple = is_training
_snake_case : Optional[int] = use_input_mask
_snake_case : Optional[Any] = use_token_type_ids
_snake_case : Union[str, Any] = use_labels
_snake_case : Union[str, Any] = vocab_size
_snake_case : Tuple = hidden_size
_snake_case : Union[str, Any] = num_hidden_layers
_snake_case : str = num_attention_heads
_snake_case : List[Any] = intermediate_multiple_size
_snake_case : Any = hidden_act
_snake_case : Tuple = hidden_dropout
_snake_case : int = attention_dropout
_snake_case : int = weight_tying
_snake_case : Union[str, Any] = max_position_embeddings
_snake_case : Union[str, Any] = type_vocab_size
_snake_case : int = type_sequence_label_size
_snake_case : List[str] = initializer_range
_snake_case : int = num_labels
_snake_case : Any = num_choices
_snake_case : Tuple = scope
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : Any = None
if self.use_input_mask:
_snake_case : str = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : int = None
if self.use_labels:
_snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case , _snake_case : str = self.prepare_config_and_inputs()
_snake_case : Any = True
return config, input_ids, input_mask, token_labels
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : int ):
'''simple docstring'''
_snake_case : str = GPTNeoXJapaneseModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case : Optional[int] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
_snake_case : int = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
_snake_case : Union[str, Any] = GPTNeoXJapaneseModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case : Union[str, Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : str = GPTNeoXJapaneseForCausalLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case : List[str] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : str , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = True
_snake_case : Any = GPTNeoXJapaneseForCausalLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# first forward pass
_snake_case : int = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
_snake_case : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_snake_case : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_snake_case : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_snake_case : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
_snake_case : str = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ )
_snake_case : Optional[Any] = output_from_no_past['hidden_states'][0]
_snake_case : Union[str, Any] = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , )['hidden_states'][0]
# select random slice
_snake_case : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_snake_case : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
_snake_case : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Optional[int] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
a_ : str =(GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
a_ : str =(GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
a_ : Dict =(
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
a_ : Any =False
a_ : List[Any] =False
a_ : Union[str, Any] =False
a_ : str =False
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : str = GPTNeoXJapaneseModelTester(self )
_snake_case : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case , _snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
_snake_case : Dict = None
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase_ )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : List[Any] = 'abeja/gpt-neox-japanese-2.7b'
_snake_case : Union[str, Any] = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
_snake_case : Tuple = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
_snake_case : Union[str, Any] = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCAmelCase_ )
_snake_case : Union[str, Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCAmelCase_ )
_snake_case : Union[str, Any] = []
for prompt in prompts:
_snake_case : List[str] = tokenizer(lowerCAmelCase_ , return_tensors='pt' ).input_ids
_snake_case : List[str] = model.generate(lowerCAmelCase_ , max_length=50 )
_snake_case : List[str] = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 717 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase )
if number < 1:
_snake_case : int = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCAmelCase )
_snake_case : int = 1
for i in range(1 , lowerCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['OwlViTFeatureExtractor']
lowerCAmelCase_ = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 718 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] =VOCAB_FILES_NAMES
a_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
a_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Any =LxmertTokenizer
def __init__( self : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=True , UpperCamelCase : List[str]="[UNK]" , UpperCamelCase : List[Any]="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Optional[int]="[MASK]" , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : List[Any] = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : Optional[int] = do_lower_case
_snake_case : Dict = strip_accents
_snake_case : Optional[int] = tokenize_chinese_chars
_snake_case : Optional[Any] = normalizer_class(**UpperCamelCase )
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : str=None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Tuple = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : int = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 0 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] = 3 )-> qiskit.result.counts.Counts:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(lowerCAmelCase ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
_snake_case : str = QuantumRegister(lowerCAmelCase , 'qr' )
_snake_case : Union[str, Any] = ClassicalRegister(lowerCAmelCase , 'cr' )
_snake_case : Optional[int] = QuantumCircuit(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[str] = number_of_qubits
for i in range(lowerCAmelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(lowerCAmelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowerCAmelCase , lowerCAmelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(lowerCAmelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(lowerCAmelCase , lowerCAmelCase )
# simulate with 10000 shots
_snake_case : List[str] = Aer.get_backend('qasm_simulator' )
_snake_case : Optional[int] = execute(lowerCAmelCase , lowerCAmelCase , shots=1_00_00 )
return job.result().get_counts(lowerCAmelCase )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 719 |
from __future__ import annotations
from random import random
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : int | None = None ):
'''simple docstring'''
_snake_case : str = value
_snake_case : List[Any] = random()
_snake_case : Node | None = None
_snake_case : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = str(self.value ) + ' '
_snake_case : List[Any] = str(self.left or '' )
_snake_case : int = str(self.right or '' )
return value + left + right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_snake_case , _snake_case : Optional[Any] = split(root.left , lowerCAmelCase )
return left, root
else:
_snake_case , _snake_case : List[str] = split(root.right , lowerCAmelCase )
return root, right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: Node | None )-> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_snake_case : str = merge(left.right , lowerCAmelCase )
return left
else:
_snake_case : Union[str, Any] = merge(lowerCAmelCase , right.left )
return right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case : Tuple = Node(lowerCAmelCase )
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , lowerCAmelCase )
return merge(merge(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , value - 1 )
_snake_case , _snake_case : List[str] = split(lowerCAmelCase , lowerCAmelCase )
return merge(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None )-> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: str )-> Node | None:
for arg in args.split():
if arg[0] == "+":
_snake_case : List[str] = insert(lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
_snake_case : Any = erase(lowerCAmelCase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowerCamelCase_ ( )-> None:
_snake_case : Tuple = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
_snake_case : List[Any] = input()
while args != "q":
_snake_case : int = interact_treap(lowerCAmelCase , lowerCAmelCase )
print(lowerCAmelCase )
_snake_case : Tuple = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 669 | 0 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 720 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( lowerCAmelCase: str = N )-> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase , lowerCAmelCase : str(int(lowerCAmelCase ) * int(lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Tuple:
_snake_case : str = args.pruning_method
_snake_case : Any = args.threshold
_snake_case : Union[str, Any] = args.model_name_or_path.rstrip('/' )
_snake_case : Dict = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
_snake_case : Dict = torch.load(os.path.join(lowerCAmelCase , 'pytorch_model.bin' ) )
_snake_case : Union[str, Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_snake_case : str = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
_snake_case : List[Any] = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
_snake_case : Tuple = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
_snake_case : Any = MagnitudeBinarizer.apply(inputs=lowerCAmelCase , threshold=lowerCAmelCase )
_snake_case : Union[str, Any] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_snake_case : List[Any] = name[:-6]
_snake_case : Any = model[F"""{prefix_}mask_scores"""]
_snake_case : Tuple = TopKBinarizer.apply(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[str] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_snake_case : str = name[:-6]
_snake_case : int = model[F"""{prefix_}mask_scores"""]
_snake_case : Optional[int] = ThresholdBinarizer.apply(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
_snake_case : List[str] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_snake_case : Optional[int] = name[:-6]
_snake_case : str = model[F"""{prefix_}mask_scores"""]
_snake_case : Dict = -0.1, 1.1
_snake_case : Union[str, Any] = torch.sigmoid(lowerCAmelCase )
_snake_case : List[str] = s * (r - l) + l
_snake_case : Tuple = s_bar.clamp(min=0.0 , max=1.0 )
_snake_case : str = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_snake_case : Optional[Any] = os.path.join(
os.path.dirname(lowerCAmelCase ) , F"""bertarized_{os.path.basename(lowerCAmelCase )}""" )
if not os.path.isdir(lowerCAmelCase ):
shutil.copytree(lowerCAmelCase , lowerCAmelCase )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(lowerCAmelCase , os.path.join(lowerCAmelCase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
lowerCAmelCase_ = parser.parse_args()
main(args)
| 721 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase_ ( )-> Any:
_snake_case : List[str] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_snake_case : Optional[Any] = Dataset.from_dict(lowerCAmelCase )
return dataset
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = get_dataset()
_snake_case : Tuple = make_duplicate_clusters(UpperCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = get_dataset()
_snake_case , _snake_case : str = deduplicate_dataset(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 2 )
print(UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCamelCase )
| 669 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase_ = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 700 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =["""image_processor""", """tokenizer"""]
a_ : Optional[int] ="""CLIPImageProcessor"""
a_ : Optional[Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
_snake_case : Optional[Any] = kwargs.pop('feature_extractor' )
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : Dict , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case : Optional[int] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
_snake_case : Optional[int] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ : List[Any] =inspect.getfile(accelerate.test_utils )
a_ : str =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] )
a_ : Union[str, Any] =["accelerate", "launch"]
a_ : Tuple =Path.home() / ".cache/huggingface/accelerate"
a_ : Tuple ="default_config.yaml"
a_ : Dict =config_folder / config_file
a_ : List[str] =config_folder / "_default_config.yaml"
a_ : Tuple =Path("""tests/test_configs""" )
@classmethod
def UpperCamelCase_ ( cls : List[str] ):
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def UpperCamelCase_ ( cls : int ):
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=_lowerCamelCase ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(_lowerCamelCase ), self.test_file_path] , env=os.environ.copy() )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ : Any ="test-tpu"
a_ : Optional[Any] ="us-central1-a"
a_ : Dict ="ls"
a_ : List[str] =["accelerate", "tpu-config"]
a_ : Optional[int] ="cd /usr/share"
a_ : Union[str, Any] ="tests/test_samples/test_command_file.sh"
a_ : Any ="Running gcloud compute tpus tpu-vm ssh"
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowerCamelCase , )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : str = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowerCamelCase , )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[str] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_lowerCamelCase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowerCamelCase , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : int = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowerCamelCase , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : str = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _lowerCamelCase , )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : int = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowerCamelCase , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Optional[Any] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowerCamelCase , )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Dict = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowerCamelCase , )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowerCamelCase , )
| 701 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase_ = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase_ = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =200
a_ : List[str] ={"""Content-Length""": """100"""}
a_ : Tuple ={}
def UpperCamelCase_ ( self : Any , **UpperCamelCase : Any ):
'''simple docstring'''
return [bytes(UpperCamelCase , 'utf-8' )]
def lowerCamelCase_ ( *lowerCAmelCase: Tuple , **lowerCAmelCase: Tuple )-> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> Optional[Any]:
import requests
monkeypatch.setattr(lowerCAmelCase , 'request' , lowerCAmelCase )
_snake_case : List[str] = URL
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[int] = url
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = [url]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': url}
_snake_case : int = 'dummy'
_snake_case : Optional[Any] = 'downloads'
_snake_case : Union[str, Any] = tmp_path
_snake_case : Dict = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase , lowerCAmelCase ) , use_etag=lowerCAmelCase , )
_snake_case : str = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Optional[int] = dl_manager.download(lowerCAmelCase )
_snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = [downloaded_paths]
_snake_case : List[str] = [urls]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
_snake_case : Any = downloaded_paths.values()
_snake_case : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase , lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case : str = Path(lowerCAmelCase )
_snake_case : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case : List[str] = downloaded_path.read_text()
assert content == CONTENT
_snake_case : Any = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any )-> str:
_snake_case : str = str(lowerCAmelCase )
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : str = filename
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[Any] = [filename]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': filename}
_snake_case : Any = 'dummy'
_snake_case : Union[str, Any] = xz_file.parent
_snake_case : int = 'extracted'
_snake_case : Union[str, Any] = DownloadConfig(
cache_dir=lowerCAmelCase , use_etag=lowerCAmelCase , )
_snake_case : List[str] = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Dict = dl_manager.extract(lowerCAmelCase )
_snake_case : Optional[int] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = [extracted_paths]
_snake_case : int = [paths]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in extracted_paths.keys()
_snake_case : Optional[int] = extracted_paths.values()
_snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase , lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case : List[str] = Path(lowerCAmelCase )
_snake_case : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase , etag=lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case : Optional[int] = extracted_path.read_text()
_snake_case : int = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> Dict:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCAmelCase , start=1 ):
_snake_case : Dict = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> Dict:
_snake_case : List[str] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: int )-> str:
_snake_case : List[Any] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase_ ( lowerCAmelCase: Any )-> int:
_snake_case : Tuple = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase ) , start=1 ):
assert os.path.basename(lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 669 | 0 |
lowerCAmelCase_ = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 702 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""roberta"""
def __init__( self : int , UpperCamelCase : Tuple=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : List[Any]=12 , UpperCamelCase : str=12 , UpperCamelCase : Dict=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : List[str]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=1e-1_2 , UpperCamelCase : str=1 , UpperCamelCase : int=0 , UpperCamelCase : Any=2 , UpperCamelCase : int="absolute" , UpperCamelCase : int=True , UpperCamelCase : List[Any]=None , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Any = vocab_size
_snake_case : List[str] = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Dict = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Union[str, Any] = use_cache
_snake_case : str = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: int , lowerCAmelCase: List[Any] , lowerCAmelCase: List[Any] , lowerCAmelCase: List[Any] , lowerCAmelCase: str , lowerCAmelCase: int , )-> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_snake_case : Tuple = cst_fwd.get(UpperCamelCase__ , np.inf )
_snake_case : int = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_snake_case : List[Any] = new_cost_f
_snake_case : Dict = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_snake_case : int = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[Any] , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Union[str, Any] )-> int:
_snake_case : Union[str, Any] = -1
_snake_case : Dict = set()
_snake_case : Optional[int] = set()
_snake_case : List[str] = {source: 0}
_snake_case : Any = {destination: 0}
_snake_case : int = {source: None}
_snake_case : Any = {destination: None}
_snake_case : Union[str, Any] = PriorityQueue()
_snake_case : Tuple = PriorityQueue()
_snake_case : List[Any] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_snake_case , _snake_case : List[Any] = queue_forward.get()
visited_forward.add(UpperCamelCase__ )
_snake_case , _snake_case : Any = queue_backward.get()
visited_backward.add(UpperCamelCase__ )
_snake_case : Tuple = pass_and_relaxation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
_snake_case : Union[str, Any] = pass_and_relaxation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_snake_case : int = shortest_distance
return shortest_path_distance
lowerCAmelCase_ = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
lowerCAmelCase_ = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
from random import randint, random
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: int = 5 , )-> list:
_snake_case : Dict = [[-1] * number_of_cells] # Create a highway without any car
_snake_case : List[str] = 0
_snake_case : List[str] = max(lowerCAmelCase , 0 )
while i < number_of_cells:
_snake_case : Optional[Any] = (
randint(0 , lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int )-> int:
_snake_case : Dict = 0
_snake_case : Optional[Any] = highway_now[car_index + 1 :]
for cell in range(len(lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCAmelCase , -1 )
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : List[Any] = len(lowerCAmelCase )
# Beforce calculations, the highway is empty
_snake_case : List[Any] = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_snake_case : int = min(highway_now[car_index] + 1 , lowerCAmelCase )
# Number of empty cell before the next car
_snake_case : Tuple = get_distance(lowerCAmelCase , lowerCAmelCase ) - 1
# We can't have the car causing an accident
_snake_case : Union[str, Any] = min(next_highway[car_index] , lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
_snake_case : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : Dict = len(highway[0] )
for i in range(lowerCAmelCase ):
_snake_case : Any = update(highway[i] , lowerCAmelCase , lowerCAmelCase )
_snake_case : Tuple = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
_snake_case : Union[str, Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_snake_case : Union[str, Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_snake_case : Tuple = speed
highway.append(lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase_ = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 704 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =VOCAB_FILES_NAMES
a_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
a_ : str =PRETRAINED_INIT_CONFIGURATION
a_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] =RealmTokenizer
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]="[UNK]" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Optional[Any]="[PAD]" , UpperCamelCase : Optional[int]="[CLS]" , UpperCamelCase : Optional[Any]="[MASK]" , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : int = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : List[str] = do_lower_case
_snake_case : List[Any] = strip_accents
_snake_case : Dict = tokenize_chinese_chars
_snake_case : Any = normalizer_class(**UpperCamelCase )
_snake_case : Optional[int] = do_lower_case
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[str] = kwargs.pop('text_pair' , UpperCamelCase )
_snake_case : int = kwargs.pop('return_tensors' , UpperCamelCase )
_snake_case : Optional[int] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCamelCase ):
if batch_text_pair is not None:
_snake_case : List[Any] = batch_text_pair[idx]
else:
_snake_case : Optional[Any] = None
_snake_case : Optional[int] = super().__call__(UpperCamelCase , UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
_snake_case : str = encoded_candidates.get('input_ids' )
_snake_case : Tuple = encoded_candidates.get('attention_mask' )
_snake_case : List[str] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCamelCase )
_snake_case : str = {key: item for key, item in output_data.items() if len(UpperCamelCase ) != 0}
return BatchEncoding(UpperCamelCase , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : int = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 0 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase_ = None
try:
import msvcrt
except ImportError:
lowerCAmelCase_ = None
try:
import fcntl
except ImportError:
lowerCAmelCase_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase_ = OSError
# Data
# ------------------------------------------------
lowerCAmelCase_ = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
lowerCAmelCase_ = "3.0.12"
lowerCAmelCase_ = None
def lowerCamelCase_ ( )-> Tuple:
global _logger
_snake_case : str = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase_ ( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : int ):
'''simple docstring'''
_snake_case : int = lock_file
return None
def __str__( self : str ):
'''simple docstring'''
_snake_case : Any = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = lock
return None
def __enter__( self : Optional[Any] ):
'''simple docstring'''
return self.lock
def __exit__( self : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Tuple ):
'''simple docstring'''
self.lock.release()
return None
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int]=-1 , UpperCamelCase : str=None ):
'''simple docstring'''
_snake_case : Dict = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
_snake_case : List[str] = self.hash_filename_if_too_long(A_ , A_ )
# The path to the lock file.
_snake_case : Any = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_snake_case : List[Any] = None
# The default timeout value.
_snake_case : Optional[Any] = timeout
# We use this lock primarily for the lock counter.
_snake_case : int = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_snake_case : Tuple = 0
return None
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return self._lock_file
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return self._timeout
@timeout.setter
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : int ):
'''simple docstring'''
_snake_case : int = float(A_ )
return None
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError()
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self._lock_file_fd is not None
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : List[Any]=0.05 ):
'''simple docstring'''
if timeout is None:
_snake_case : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_snake_case : Optional[int] = id(self )
_snake_case : int = self._lock_file
_snake_case : str = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(A_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_snake_case : int = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCamelCase_ ( self : str , UpperCamelCase : List[str]=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_snake_case : Tuple = id(self )
_snake_case : int = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
_snake_case : Union[str, Any] = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self : int ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
self.release()
return None
def __del__( self : str ):
'''simple docstring'''
self.release(force=A_ )
return None
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : int ):
'''simple docstring'''
_snake_case : Any = os.path.basename(A_ )
if len(A_ ) > max_length and max_length > 0:
_snake_case : List[str] = os.path.dirname(A_ )
_snake_case : Optional[int] = str(hash(A_ ) )
_snake_case : int = filename[: max_length - len(A_ ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(A_ , A_ )
else:
return path
class lowerCAmelCase_ ( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int]=-1 , UpperCamelCase : str=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
_snake_case : Any = '\\\\?\\' + relative_to_absolute_path(self.lock_file )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_snake_case : Union[str, Any] = os.open(self._lock_file , A_ )
except OSError:
pass
else:
try:
msvcrt.locking(A_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(A_ )
else:
_snake_case : List[str] = fd
return None
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self._lock_file_fd
_snake_case : Optional[Any] = None
msvcrt.locking(A_ , msvcrt.LK_UNLCK , 1 )
os.close(A_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase_ ( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase : Any , UpperCamelCase : Optional[Any]=-1 , UpperCamelCase : Tuple=None ):
'''simple docstring'''
_snake_case : List[str] = os.statvfs(os.path.dirname(A_ ) ).f_namemax
super().__init__(A_ , timeout=A_ , max_filename_length=A_ )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_snake_case : str = os.open(self._lock_file , A_ )
try:
fcntl.flock(A_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(A_ )
else:
_snake_case : Union[str, Any] = fd
return None
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : List[Any] = self._lock_file_fd
_snake_case : Dict = None
fcntl.flock(A_ , fcntl.LOCK_UN )
os.close(A_ )
return None
class lowerCAmelCase_ ( _UpperCAmelCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_snake_case : str = os.open(self._lock_file , A_ )
except OSError:
pass
else:
_snake_case : List[Any] = fd
return None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
os.close(self._lock_file_fd )
_snake_case : List[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase_ = None
if msvcrt:
lowerCAmelCase_ = WindowsFileLock
elif fcntl:
lowerCAmelCase_ = UnixFileLock
else:
lowerCAmelCase_ = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 705 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] )-> Optional[int]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_snake_case : Tuple = TOKENIZER_CLASSES
else:
_snake_case : Union[str, Any] = {tokenizer_name: getattr(lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_snake_case : Dict = TOKENIZER_CLASSES[tokenizer_name]
_snake_case : Optional[Any] = True
if checkpoint_name is None:
_snake_case : Union[str, Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_snake_case : Optional[int] = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_snake_case : str = tokenizer_class.from_pretrained(lowerCAmelCase , force_download=lowerCAmelCase )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_snake_case , _snake_case : Tuple = checkpoint.split('/' )
_snake_case : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
elif add_prefix:
_snake_case : Dict = checkpoint
_snake_case : Optional[Any] = dump_path
else:
_snake_case : str = None
_snake_case : Union[str, Any] = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_snake_case : Optional[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_snake_case : Optional[int] = file_path.split(lowerCAmelCase )[-1][0]
if next_char == "/":
_snake_case : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
_snake_case : str = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_snake_case : Optional[int] = tokenizer.save_pretrained(
lowerCAmelCase , legacy_format=lowerCAmelCase , filename_prefix=lowerCAmelCase )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCAmelCase )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCAmelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 0 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class _lowerCAmelCase ( __lowercase ):
'''simple docstring'''
a_ : int ="""owlvit_text_model"""
def __init__( self : List[Any] , UpperCamelCase : Any=4_94_08 , UpperCamelCase : Any=5_12 , UpperCamelCase : Optional[int]=20_48 , UpperCamelCase : int=12 , UpperCamelCase : List[Any]=8 , UpperCamelCase : str=16 , UpperCamelCase : Any="quick_gelu" , UpperCamelCase : Dict=1e-5 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : Union[str, Any]=0.02 , UpperCamelCase : Any=1.0 , UpperCamelCase : Dict=0 , UpperCamelCase : Optional[int]=4_94_06 , UpperCamelCase : Dict=4_94_07 , **UpperCamelCase : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Optional[int] = vocab_size
_snake_case : List[str] = hidden_size
_snake_case : str = intermediate_size
_snake_case : List[Any] = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : List[Any] = max_position_embeddings
_snake_case : Union[str, Any] = hidden_act
_snake_case : str = layer_norm_eps
_snake_case : int = attention_dropout
_snake_case : Optional[Any] = initializer_range
_snake_case : Tuple = initializer_factor
@classmethod
def UpperCamelCase_ ( cls : List[Any] , UpperCamelCase : Union[str, os.PathLike] , **UpperCamelCase : Dict ):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase )
_snake_case : List[str] = cls.get_config_dict(UpperCamelCase , **UpperCamelCase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
_snake_case : Any = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase , **UpperCamelCase )
class _lowerCAmelCase ( __lowercase ):
'''simple docstring'''
a_ : str ="""owlvit_vision_model"""
def __init__( self : List[str] , UpperCamelCase : Any=7_68 , UpperCamelCase : Any=30_72 , UpperCamelCase : Any=12 , UpperCamelCase : int=12 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : str=7_68 , UpperCamelCase : Dict=32 , UpperCamelCase : Union[str, Any]="quick_gelu" , UpperCamelCase : str=1e-5 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : Any=0.02 , UpperCamelCase : Union[str, Any]=1.0 , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : List[Any] = hidden_size
_snake_case : List[Any] = intermediate_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : Tuple = num_channels
_snake_case : List[Any] = image_size
_snake_case : Dict = patch_size
_snake_case : Tuple = hidden_act
_snake_case : Any = layer_norm_eps
_snake_case : Tuple = attention_dropout
_snake_case : int = initializer_range
_snake_case : str = initializer_factor
@classmethod
def UpperCamelCase_ ( cls : List[str] , UpperCamelCase : Union[str, os.PathLike] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase )
_snake_case : Union[str, Any] = cls.get_config_dict(UpperCamelCase , **UpperCamelCase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
_snake_case : Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase , **UpperCamelCase )
class _lowerCAmelCase ( __lowercase ):
'''simple docstring'''
a_ : Tuple ="""owlvit"""
a_ : List[str] =True
def __init__( self : str , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[str]=None , UpperCamelCase : Tuple=5_12 , UpperCamelCase : Union[str, Any]=2.65_92 , UpperCamelCase : str=True , **UpperCamelCase : int , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
if text_config is None:
_snake_case : Union[str, Any] = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
_snake_case : str = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
_snake_case : List[str] = OwlViTTextConfig(**UpperCamelCase )
_snake_case : str = OwlViTVisionConfig(**UpperCamelCase )
_snake_case : Tuple = projection_dim
_snake_case : int = logit_scale_init_value
_snake_case : List[str] = return_dict
_snake_case : Optional[int] = 1.0
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , UpperCamelCase : Union[str, os.PathLike] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase )
_snake_case : Optional[Any] = cls.get_config_dict(UpperCamelCase , **UpperCamelCase )
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase , **UpperCamelCase )
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Dict , **UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : List[str] = {}
_snake_case : List[str] = text_config
_snake_case : str = vision_config
return cls.from_dict(UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = copy.deepcopy(self.__dict__ )
_snake_case : List[Any] = self.text_config.to_dict()
_snake_case : str = self.vision_config.to_dict()
_snake_case : Tuple = self.__class__.model_type
return output
class _lowerCAmelCase ( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return 1e-4
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : "ProcessorMixin" , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : Optional["TensorType"] = None , ):
'''simple docstring'''
_snake_case : Dict = super().generate_dummy_inputs(
processor.tokenizer , batch_size=UpperCamelCase , seq_length=UpperCamelCase , framework=UpperCamelCase )
_snake_case : List[Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=UpperCamelCase , framework=UpperCamelCase )
return {**text_input_dict, **image_input_dict}
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return 14
| 706 |
def lowerCamelCase_ ( lowerCAmelCase: bytes )-> str:
return "".join([hex(lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase )] )
def lowerCamelCase_ ( lowerCAmelCase: str )-> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCAmelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
def lowerCamelCase_ ( lowerCAmelCase: list[int] )-> Any:
_snake_case : int = len(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , _SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
_snake_case , _snake_case : Optional[Any] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase_ = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 707 |
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
def lowerCamelCase_ ( lowerCAmelCase: str )-> None:
# authorize twitter, initialize tweepy
_snake_case : Optional[Any] = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
_snake_case : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : List[str] = api.user_timeline(screen_name=lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
_snake_case : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : Tuple = api.user_timeline(
screen_name=lowerCAmelCase , count=2_00 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
_snake_case : List[str] = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
_snake_case : Any = csv.writer(lowerCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 669 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( A_ ):
'''simple docstring'''
a_ : Tuple =VOCAB_FILES_NAMES
a_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
a_ : Tuple =PRETRAINED_INIT_CONFIGURATION
a_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[str] =RealmTokenizer
def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : List[Any]=True , UpperCamelCase : List[Any]="[UNK]" , UpperCamelCase : Dict="[SEP]" , UpperCamelCase : int="[PAD]" , UpperCamelCase : Tuple="[CLS]" , UpperCamelCase : List[str]="[MASK]" , UpperCamelCase : int=True , UpperCamelCase : Union[str, Any]=None , **UpperCamelCase : str , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : int = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : Optional[Any] = do_lower_case
_snake_case : List[str] = strip_accents
_snake_case : Dict = tokenize_chinese_chars
_snake_case : Optional[Any] = normalizer_class(**UpperCamelCase )
_snake_case : Any = do_lower_case
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : Optional[Any] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : int = PaddingStrategy.MAX_LENGTH
_snake_case : Optional[int] = text
_snake_case : Tuple = kwargs.pop('text_pair' , UpperCamelCase )
_snake_case : List[Any] = kwargs.pop('return_tensors' , UpperCamelCase )
_snake_case : Tuple = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(UpperCamelCase ):
if batch_text_pair is not None:
_snake_case : List[Any] = batch_text_pair[idx]
else:
_snake_case : Any = None
_snake_case : List[str] = super().__call__(UpperCamelCase , UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
_snake_case : str = encoded_candidates.get('input_ids' )
_snake_case : Union[str, Any] = encoded_candidates.get('attention_mask' )
_snake_case : int = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCamelCase )
_snake_case : List[str] = {key: item for key, item in output_data.items() if len(UpperCamelCase ) != 0}
return BatchEncoding(UpperCamelCase , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , UpperCamelCase : Dict=None ):
'''simple docstring'''
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Tuple = None ):
'''simple docstring'''
_snake_case : List[str] = [self.sep_token_id]
_snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : Any = None ):
'''simple docstring'''
_snake_case : Any = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 708 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[Union[str, Path]] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : bool =True
a_ : Optional[int] =None
a_ : int =1
a_ : Optional[Union[str, bool]] =None
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase ) for k, v in self.__dict__.items()} )
| 669 | 0 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Dict , lowerCAmelCase: Tuple )-> List[Any]: # noqa: E741
while r - l > 1:
_snake_case : Dict = (l + r) // 2
if v[m] >= key:
_snake_case : Dict = m
else:
_snake_case : Union[str, Any] = m # noqa: E741
return r
def lowerCamelCase_ ( lowerCAmelCase: list[int] )-> Optional[int]:
if len(lowerCAmelCase__ ) == 0:
return 0
_snake_case : int = [0] * len(lowerCAmelCase__ )
_snake_case : List[Any] = 1
_snake_case : Union[str, Any] = v[0]
for i in range(1 , len(lowerCAmelCase__ ) ):
if v[i] < tail[0]:
_snake_case : int = v[i]
elif v[i] > tail[length - 1]:
_snake_case : Optional[int] = v[i]
length += 1
else:
_snake_case : Union[str, Any] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase_ = ["""gpt2"""]
lowerCAmelCase_ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = tokenizer
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase )
_snake_case : int = TFGPTaLMHeadModel.from_config(UpperCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer(UpperCamelCase )
_snake_case : Union[str, Any] = tokenized['input_ids'].to_tensor()
_snake_case : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_snake_case : Tuple = self.model(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
_snake_case : Optional[int] = [GPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_snake_case : Tuple = [TFGPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case : Any = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_snake_case : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_snake_case : Optional[int] = tokenizer([test_inputs] , return_tensors='tf' )
_snake_case : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_snake_case : Dict = python_outputs[key].numpy()
_snake_case : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : str = tf.function(UpperCamelCase )
for test_inputs in self.test_sentences:
_snake_case : int = tf.constant(UpperCamelCase )
_snake_case : Tuple = compiled_tokenizer(UpperCamelCase )
_snake_case : int = tf_tokenizer(UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Union[str, Any] = ModelToSave(tokenizer=UpperCamelCase )
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Tuple = model.serving(UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case : str = Path(UpperCamelCase ) / 'saved.model'
tf.saved_model.save(UpperCamelCase , UpperCamelCase , signatures={'serving_default': model.serving} )
_snake_case : Optional[int] = tf.saved_model.load(UpperCamelCase )
_snake_case : List[str] = loaded_model.signatures['serving_default'](UpperCamelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Any = tf_tokenizer(UpperCamelCase ) # Build model with some sample inputs
_snake_case : Optional[Any] = tf_tokenizer.get_config()
_snake_case : Tuple = TFGPTaTokenizer.from_config(UpperCamelCase )
_snake_case : Optional[Any] = model_from_config(UpperCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_snake_case : Union[str, Any] = 12_31_23
for max_length in [3, 5, 10_24]:
_snake_case : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : List[str] = tf_tokenizer(UpperCamelCase , max_length=UpperCamelCase )
_snake_case : int = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 669 | 0 |
import math
import flax.linen as nn
import jax.numpy as jnp
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Any = 1 , lowerCAmelCase: Any = 1 , lowerCAmelCase: int = 1.0E4 , lowerCAmelCase: Optional[int] = False , lowerCAmelCase: Optional[Any] = 1.0 , )-> jnp.ndarray:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
_snake_case : Tuple = float(embedding_dim // 2 )
_snake_case : List[str] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
_snake_case : Optional[int] = min_timescale * jnp.exp(jnp.arange(lowerCAmelCase , dtype=jnp.floataa ) * -log_timescale_increment )
_snake_case : Optional[Any] = jnp.expand_dims(lowerCAmelCase , 1 ) * jnp.expand_dims(lowerCAmelCase , 0 )
# scale embeddings
_snake_case : int = scale * emb
if flip_sin_to_cos:
_snake_case : List[Any] = jnp.concatenate([jnp.cos(lowerCAmelCase ), jnp.sin(lowerCAmelCase )] , axis=1 )
else:
_snake_case : int = jnp.concatenate([jnp.sin(lowerCAmelCase ), jnp.cos(lowerCAmelCase )] , axis=1 )
_snake_case : str = jnp.reshape(lowerCAmelCase , [jnp.shape(lowerCAmelCase )[0], embedding_dim] )
return signal
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
a_ : int =32
a_ : jnp.dtype =jnp.floataa
@nn.compact
def __call__( self : Union[str, Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : List[str] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(lowerCamelCase_ )
_snake_case : Any = nn.silu(lowerCamelCase_ )
_snake_case : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(lowerCamelCase_ )
return temb
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
a_ : int =32
a_ : bool =False
a_ : float =1
@nn.compact
def __call__( self : Dict , UpperCamelCase : Optional[int] ):
'''simple docstring'''
return get_sinusoidal_embeddings(
lowerCamelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 710 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> list:
_snake_case : List[Any] = int(lowerCAmelCase )
if n_element < 1:
_snake_case : int = ValueError('a should be a positive number' )
raise my_error
_snake_case : Union[str, Any] = [1]
_snake_case , _snake_case , _snake_case : Any = (0, 0, 0)
_snake_case : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 669 | 0 |
lowerCAmelCase_ = {
0: """0""",
1: """1""",
2: """2""",
3: """3""",
4: """4""",
5: """5""",
6: """6""",
7: """7""",
8: """8""",
9: """9""",
10: """a""",
11: """b""",
12: """c""",
13: """d""",
14: """e""",
15: """f""",
}
def lowerCamelCase_ ( lowerCAmelCase: float )-> Dict:
assert type(lowerCAmelCase ) in (int, float) and decimal == int(lowerCAmelCase )
_snake_case : List[str] = int(lowerCAmelCase )
_snake_case : Optional[Any] = ''
_snake_case : Optional[Any] = False
if decimal < 0:
_snake_case : Dict = True
decimal *= -1
while decimal > 0:
_snake_case , _snake_case : str = divmod(lowerCAmelCase , 16 )
_snake_case : Union[str, Any] = values[remainder] + hexadecimal
_snake_case : Any = '0x' + hexadecimal
if negative:
_snake_case : Union[str, Any] = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Tuple="shi-labs/oneformer_demo" )-> Any:
with open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) as f:
_snake_case : str = json.load(lowerCAmelCase )
_snake_case : List[str] = {}
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = []
for key, info in class_info.items():
_snake_case : Optional[int] = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase ) )
_snake_case : List[str] = thing_ids
_snake_case : Optional[Any] = class_names
return metadata
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any=7 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Dict=30 , UpperCamelCase : int=4_00 , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : str=True , UpperCamelCase : Any=[0.5, 0.5, 0.5] , UpperCamelCase : int=[0.5, 0.5, 0.5] , UpperCamelCase : Dict=10 , UpperCamelCase : Dict=False , UpperCamelCase : Dict=2_55 , UpperCamelCase : Dict="shi-labs/oneformer_demo" , UpperCamelCase : Optional[int]="ade20k_panoptic.json" , UpperCamelCase : Tuple=10 , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Tuple = num_channels
_snake_case : List[str] = min_resolution
_snake_case : List[str] = max_resolution
_snake_case : Optional[Any] = do_resize
_snake_case : Optional[Any] = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
_snake_case : Optional[int] = do_normalize
_snake_case : Any = image_mean
_snake_case : List[Any] = image_std
_snake_case : Any = class_info_file
_snake_case : List[str] = prepare_metadata(UpperCamelCase , UpperCamelCase )
_snake_case : Any = num_text
_snake_case : str = repo_path
# for the post_process_functions
_snake_case : Optional[Any] = 2
_snake_case : str = 10
_snake_case : Union[str, Any] = 10
_snake_case : List[Any] = 3
_snake_case : str = 4
_snake_case : List[Any] = num_labels
_snake_case : str = do_reduce_labels
_snake_case : List[str] = ignore_index
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
if not batched:
_snake_case : Any = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
_snake_case , _snake_case : Any = image.size
else:
_snake_case , _snake_case : Any = image.shape[1], image.shape[2]
if w < h:
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
_snake_case : Any = self.size['shortest_edge']
elif w > h:
_snake_case : int = self.size['shortest_edge']
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
_snake_case : Dict = self.size['shortest_edge']
_snake_case : Dict = self.size['shortest_edge']
else:
_snake_case : List[Any] = []
for image in image_inputs:
_snake_case , _snake_case : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : List[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
_snake_case : Optional[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ : Any =image_processing_class
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCamelCase , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCamelCase , 'num_text' ) )
self.assertTrue(hasattr(UpperCamelCase , 'repo_path' ) )
self.assertTrue(hasattr(UpperCamelCase , 'metadata' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_reduce_labels' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : Optional[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : List[Any] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : int = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
_snake_case : int = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : Optional[int] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
_snake_case : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : List[str] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Tuple=False , UpperCamelCase : str=False , UpperCamelCase : Dict="np" ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_snake_case : List[str] = self.image_processing_tester.num_labels
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
if with_segmentation_maps:
_snake_case : Optional[int] = num_labels
if is_instance_map:
_snake_case : Union[str, Any] = list(range(UpperCamelCase ) ) * 2
_snake_case : Tuple = dict(enumerate(UpperCamelCase ) )
_snake_case : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_snake_case : int = [Image.fromarray(UpperCamelCase ) for annotation in annotations]
_snake_case : List[Any] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , UpperCamelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCamelCase , pad_and_return_pixel_mask=UpperCamelCase , )
return inputs
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
def common(UpperCamelCase : Any=False , UpperCamelCase : int=None ):
_snake_case : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase , is_instance_map=UpperCamelCase , segmentation_type=UpperCamelCase )
_snake_case : Union[str, Any] = inputs['mask_labels']
_snake_case : Optional[int] = inputs['class_labels']
_snake_case : Optional[int] = inputs['pixel_values']
_snake_case : Optional[Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.zeros((20, 50) )
_snake_case : int = 1
_snake_case : int = 1
_snake_case : Optional[Any] = 1
_snake_case : List[Any] = binary_mask_to_rle(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = fature_extractor.post_process_semantic_segmentation(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_snake_case : Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_snake_case : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(UpperCamelCase , target_sizes=UpperCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : int = image_processor.post_process_instance_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = image_processor.post_process_panoptic_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 669 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase : Tuple , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : int = 13
_snake_case : int = 7
_snake_case : str = True
_snake_case : Optional[Any] = True
_snake_case : str = True
_snake_case : List[str] = True
_snake_case : Tuple = True
_snake_case : Optional[int] = False
_snake_case : int = False
_snake_case : List[Any] = False
_snake_case : Tuple = 2
_snake_case : Optional[int] = 99
_snake_case : str = 0
_snake_case : int = 32
_snake_case : Union[str, Any] = 2
_snake_case : int = 4
_snake_case : Tuple = 0.1
_snake_case : Optional[Any] = 0.1
_snake_case : Union[str, Any] = 5_12
_snake_case : Tuple = 16
_snake_case : List[str] = 2
_snake_case : Dict = 0.02
_snake_case : Union[str, Any] = 3
_snake_case : Optional[int] = 4
_snake_case : Dict = 'last'
_snake_case : Optional[Any] = True
_snake_case : Optional[Any] = None
_snake_case : List[Any] = 0
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : int = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
_snake_case : Optional[int] = None
if self.use_input_lengths:
_snake_case : List[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_snake_case : Dict = None
if self.use_token_type_ids:
_snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_snake_case : List[Any] = None
_snake_case : Union[str, Any] = None
_snake_case : int = None
if self.use_labels:
_snake_case : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case : List[str] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
_snake_case : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_snake_case : Optional[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
_snake_case : Optional[Any] = TFFlaubertModel(config=lowerCAmelCase_ )
_snake_case : Optional[int] = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
_snake_case : Optional[Any] = model(lowerCAmelCase_ )
_snake_case : List[Any] = [input_ids, input_mask]
_snake_case : List[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : str , ):
'''simple docstring'''
_snake_case : str = TFFlaubertWithLMHeadModel(lowerCAmelCase_ )
_snake_case : Optional[int] = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
_snake_case : Union[str, Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , ):
'''simple docstring'''
_snake_case : Optional[Any] = TFFlaubertForQuestionAnsweringSimple(lowerCAmelCase_ )
_snake_case : Optional[int] = {'input_ids': input_ids, 'lengths': input_lengths}
_snake_case : Optional[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : str , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : str , ):
'''simple docstring'''
_snake_case : List[str] = TFFlaubertForSequenceClassification(lowerCAmelCase_ )
_snake_case : Optional[int] = {'input_ids': input_ids, 'lengths': input_lengths}
_snake_case : Optional[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : Any , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Tuple , ):
'''simple docstring'''
_snake_case : Dict = self.num_labels
_snake_case : Tuple = TFFlaubertForTokenClassification(config=lowerCAmelCase_ )
_snake_case : Dict = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_snake_case : int = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.num_choices
_snake_case : Optional[int] = TFFlaubertForMultipleChoice(config=lowerCAmelCase_ )
_snake_case : str = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : List[str] = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : List[Any] = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : List[str] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_snake_case : Dict = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Union[str, Any] = config_and_inputs
_snake_case : Tuple = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
a_ : str =(
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
a_ : Any =(
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
a_ : Union[str, Any] =(
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ : List[str] =False
a_ : List[str] =False
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : Any ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = TFFlaubertModelTester(self )
_snake_case : Dict = ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase_ )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase_ )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase_ )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase_ )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCAmelCase_ )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCAmelCase_ )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = TFFlaubertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
_snake_case : Dict = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
_snake_case : Dict = model(lowerCAmelCase_ )[0]
_snake_case : List[Any] = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , lowerCAmelCase_ )
# compare the actual values for a slice.
_snake_case : Any = tf.convert_to_tensor(
[
[
[-1.8_76_87_73, -1.56_65_55, 0.27_07_24_18],
[-1.6_92_00_38, -0.5_87_35_05, 1.9_32_95_99],
[-2.9_56_39_85, -1.6_99_38_35, 1.7_97_20_52],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 712 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase_ = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCamelCase_ ( )-> Tuple:
_snake_case : int = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_snake_case : int = get_sagemaker_input()
else:
_snake_case : Any = get_cluster_input()
return config
def lowerCamelCase_ ( lowerCAmelCase: str=None )-> Any:
if subparsers is not None:
_snake_case : List[Any] = subparsers.add_parser('config' , description=lowerCAmelCase )
else:
_snake_case : Dict = argparse.ArgumentParser('Accelerate config command' , description=lowerCAmelCase )
parser.add_argument(
'--config_file' , default=lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Any:
_snake_case : Dict = get_user_input()
if args.config_file is not None:
_snake_case : List[str] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
_snake_case : Union[str, Any] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowerCAmelCase )
else:
config.to_yaml_file(lowerCAmelCase )
print(F"""accelerate configuration saved at {config_file}""" )
def lowerCamelCase_ ( )-> Dict:
_snake_case : List[str] = config_command_parser()
_snake_case : str = parser.parse_args()
config_command(lowerCAmelCase )
if __name__ == "__main__":
main()
| 669 | 0 |
import os
def lowerCamelCase_ ( )-> Union[str, Any]:
_snake_case : List[Any] = os.path.join(os.path.dirname(a_ ) , 'num.txt' )
with open(a_ ) as file_hand:
return str(sum(int(a_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 713 |
# Function to print upper half of diamond (pyramid)
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> List[str]:
for i in range(0 , lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> List[Any]:
for i in range(lowerCAmelCase , 0 , -1 ):
for _ in range(lowerCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> int:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase ) # upper half
reverse_floyd(lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
lowerCAmelCase_ = 1
while K:
lowerCAmelCase_ = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
lowerCAmelCase_ = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 669 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 714 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""audio-spectrogram-transformer"""
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any]=7_68 , UpperCamelCase : int=12 , UpperCamelCase : str=12 , UpperCamelCase : Tuple=30_72 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Any=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : str=16 , UpperCamelCase : List[Any]=True , UpperCamelCase : Any=10 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : int=10_24 , UpperCamelCase : Optional[Any]=1_28 , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : Tuple = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : int = patch_size
_snake_case : List[str] = qkv_bias
_snake_case : int = frequency_stride
_snake_case : List[Any] = time_stride
_snake_case : List[Any] = max_length
_snake_case : List[str] = num_mel_bins
| 669 | 0 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase: list[int] , lowerCAmelCase: list[int] , lowerCAmelCase: list[int] , lowerCAmelCase: list[list[str]] , lowerCAmelCase: int , )-> None:
_snake_case : Optional[int] = len(UpperCAmelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(UpperCAmelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , UpperCAmelCase__ , UpperCAmelCase__ , )
def lowerCamelCase_ ( lowerCAmelCase: int )-> None:
_snake_case : Optional[int] = []
depth_first_search([] , [] , [] , UpperCAmelCase__ , UpperCAmelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(UpperCAmelCase__ )
print('' )
print(len(UpperCAmelCase__ ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 715 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: bool = True , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: bool = False , lowerCAmelCase: float = 1_00 , lowerCAmelCase: float = 0.0_1 , lowerCAmelCase: float = 1 , )-> Any:
_snake_case : int = False
_snake_case : Any = search_prob
_snake_case : Tuple = start_temperate
_snake_case : Any = []
_snake_case : List[str] = 0
_snake_case : Optional[Any] = None
while not search_end:
_snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case : Dict = current_state
scores.append(lowerCAmelCase )
iterations += 1
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case : Dict = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
_snake_case : int = neighbors.pop(lowerCAmelCase )
_snake_case : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case : Union[str, Any] = picked_neighbor
else:
_snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case : int = picked_neighbor
_snake_case : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case : List[str] = True
else:
_snake_case : Union[str, Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[Any] )-> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Dict:
return (3 * x**2) - (6 * y)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 669 | 0 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _lowerCAmelCase ( ctypes.Structure ):
'''simple docstring'''
a_ : int =[("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def lowerCamelCase_ ( )-> Any:
if os.name == "nt":
_snake_case : int = CursorInfo()
_snake_case : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
_snake_case : List[str] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def lowerCamelCase_ ( )-> Optional[Any]:
if os.name == "nt":
_snake_case : Dict = CursorInfo()
_snake_case : Optional[int] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
_snake_case : Dict = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def lowerCamelCase_ ( )-> Any:
try:
hide_cursor()
yield
finally:
show_cursor()
| 716 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : torch.FloatTensor
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : str , UpperCamelCase : int = 32 , UpperCamelCase : int = 64 , UpperCamelCase : int = 20 , UpperCamelCase : int = 7_68 , UpperCamelCase : Optional[int]=77 , UpperCamelCase : int=4 , UpperCamelCase : float = 0.0 , UpperCamelCase : str = "silu" , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = "linear" , UpperCamelCase : Optional[str] = "prd" , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case : str = num_attention_heads
_snake_case : Optional[int] = attention_head_dim
_snake_case : Any = num_attention_heads * attention_head_dim
_snake_case : List[Any] = additional_embeddings
_snake_case : List[str] = time_embed_dim or inner_dim
_snake_case : int = embedding_proj_dim or embedding_dim
_snake_case : List[Any] = clip_embed_dim or embedding_dim
_snake_case : Optional[Any] = Timesteps(UpperCamelCase , UpperCamelCase , 0 )
_snake_case : List[Any] = TimestepEmbedding(UpperCamelCase , UpperCamelCase , out_dim=UpperCamelCase , act_fn=UpperCamelCase )
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
if embedding_proj_norm_type is None:
_snake_case : str = None
elif embedding_proj_norm_type == "layer":
_snake_case : List[Any] = nn.LayerNorm(UpperCamelCase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_snake_case : str = nn.Linear(UpperCamelCase , UpperCamelCase )
if encoder_hid_proj_type is None:
_snake_case : Any = None
elif encoder_hid_proj_type == "linear":
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase ) )
if added_emb_type == "prd":
_snake_case : str = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase ) )
elif added_emb_type is None:
_snake_case : Dict = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_snake_case : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , activation_fn='gelu' , attention_bias=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
if norm_in_type == "layer":
_snake_case : Optional[int] = nn.LayerNorm(UpperCamelCase )
elif norm_in_type is None:
_snake_case : Optional[Any] = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
_snake_case : Optional[Any] = nn.LayerNorm(UpperCamelCase )
_snake_case : Union[str, Any] = nn.Linear(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_snake_case : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , UpperCamelCase , persistent=UpperCamelCase )
_snake_case : str = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = {}
def fn_recursive_add_processors(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase , 'set_processor' ):
_snake_case : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return processors
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
_snake_case : Optional[int] = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Union[str, Any] ):
if hasattr(UpperCamelCase , 'set_processor' ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
module.set_processor(UpperCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Union[torch.Tensor, float, int] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[torch.BoolTensor] = None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_snake_case : Dict = hidden_states.shape[0]
_snake_case : str = timestep
if not torch.is_tensor(UpperCamelCase ):
_snake_case : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase ) and len(timesteps.shape ) == 0:
_snake_case : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case : Optional[int] = timesteps * torch.ones(UpperCamelCase , dtype=timesteps.dtype , device=timesteps.device )
_snake_case : Union[str, Any] = self.time_proj(UpperCamelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_snake_case : Tuple = timesteps_projected.to(dtype=self.dtype )
_snake_case : List[Any] = self.time_embedding(UpperCamelCase )
if self.embedding_proj_norm is not None:
_snake_case : Optional[Any] = self.embedding_proj_norm(UpperCamelCase )
_snake_case : Union[str, Any] = self.embedding_proj(UpperCamelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_snake_case : Dict = self.encoder_hidden_states_proj(UpperCamelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_snake_case : str = self.proj_in(UpperCamelCase )
_snake_case : int = self.positional_embedding.to(hidden_states.dtype )
_snake_case : Optional[int] = []
_snake_case : List[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_snake_case : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_snake_case : str = hidden_states[:, None, :]
_snake_case : str = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_snake_case : int = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase , -1 , -1 )
additional_embeds.append(UpperCamelCase )
_snake_case : Optional[int] = torch.cat(
UpperCamelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_snake_case : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_snake_case : Optional[Any] = F.pad(
UpperCamelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_snake_case : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_snake_case : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_snake_case : Tuple = F.pad(UpperCamelCase , (0, self.additional_embeddings) , value=0.0 )
_snake_case : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_snake_case : str = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_snake_case : Tuple = self.norm_in(UpperCamelCase )
for block in self.transformer_blocks:
_snake_case : Any = block(UpperCamelCase , attention_mask=UpperCamelCase )
_snake_case : Dict = self.norm_out(UpperCamelCase )
if self.prd_embedding is not None:
_snake_case : str = hidden_states[:, -1]
else:
_snake_case : Any = hidden_states[:, additional_embeddings_len:]
_snake_case : List[Any] = self.proj_to_clip_embeddings(UpperCamelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 669 | 0 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowerCAmelCase_ = """src/transformers"""
lowerCAmelCase_ = """docs/source/en/tasks"""
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: str )-> Tuple:
with open(lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
_snake_case : Any = f.readlines()
# Find the start prompt.
_snake_case : List[Any] = 0
while not lines[start_index].startswith(lowerCAmelCase ):
start_index += 1
start_index += 1
_snake_case : int = start_index
while not lines[end_index].startswith(lowerCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase_ = direct_transformers_import(TRANSFORMERS_PATH)
lowerCAmelCase_ = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowerCAmelCase_ = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> Any:
_snake_case : Tuple = TASK_GUIDE_TO_MODELS[task_guide]
_snake_case : Union[str, Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowerCAmelCase , set() )
_snake_case : Dict = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Dict=False )-> Union[str, Any]:
_snake_case , _snake_case , _snake_case , _snake_case : Tuple = _find_text_in_file(
filename=os.path.join(lowerCAmelCase , lowerCAmelCase ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
_snake_case : Optional[Any] = get_model_list_for_task(lowerCAmelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowerCAmelCase , lowerCAmelCase ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
' to fix this.' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCAmelCase_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 717 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase )
if number < 1:
_snake_case : int = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCAmelCase )
_snake_case : int = 1
for i in range(1 , lowerCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
from __future__ import annotations
from cmath import sqrt
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int )-> List[str]:
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
_snake_case : Dict = b * b - 4 * a * c
_snake_case : int = (-b + sqrt(lowerCAmelCase )) / (2 * a)
_snake_case : List[Any] = (-b - sqrt(lowerCAmelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCamelCase_ ( )-> Tuple:
_snake_case : Tuple = quadratic_roots(a=5 , b=6 , c=1 )
print(F"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 718 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] =VOCAB_FILES_NAMES
a_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
a_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Any =LxmertTokenizer
def __init__( self : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=True , UpperCamelCase : List[str]="[UNK]" , UpperCamelCase : List[Any]="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Optional[int]="[MASK]" , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : List[Any] = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : Optional[int] = do_lower_case
_snake_case : Dict = strip_accents
_snake_case : Optional[int] = tokenize_chinese_chars
_snake_case : Optional[Any] = normalizer_class(**UpperCamelCase )
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : str=None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Tuple = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : int = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
lowerCAmelCase_ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowerCamelCase_ ( lowerCAmelCase: str )-> int:
with open(snake_case_ , 'rb' ) as f:
_snake_case : List[Any] = Image.open(snake_case_ )
return im.convert('RGB' )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : List[str] =field(
default=UpperCamelCase_ , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
a_ : Optional[Any] =field(
default=UpperCamelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ : Optional[int] =field(default=UpperCamelCase_ , metadata={"""help""": """A folder containing the training data."""} )
a_ : Optional[Any] =field(default=UpperCamelCase_ , metadata={"""help""": """A folder containing the validation data."""} )
a_ : Union[str, Any] =field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
a_ : List[Any] =field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a_ : Optional[Any] =field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[int] =field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
a_ : int =field(
default=UpperCamelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCamelCase_ )} , )
a_ : Dict =field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ : Dict =field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
a_ : Dict =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a_ : Optional[Any] =field(default=UpperCamelCase_ , metadata={"""help""": """Name or path of preprocessor config."""} )
a_ : Union[str, Any] =field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
a_ : Optional[int] =field(
default=UpperCamelCase_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Union[str, Any]:
_snake_case : List[str] = torch.stack([example['pixel_values'] for example in examples] )
_snake_case : List[Any] = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowerCamelCase_ ( )-> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_snake_case : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , snake_case_ , snake_case_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_snake_case : Tuple = training_args.get_process_log_level()
logger.setLevel(snake_case_ )
transformers.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_snake_case : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_snake_case : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_snake_case : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
_snake_case : Any = {}
if data_args.train_dir is not None:
_snake_case : str = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
_snake_case : Tuple = os.path.join(data_args.validation_dir , '**' )
_snake_case : int = load_dataset(
'imagefolder' , data_files=snake_case_ , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
_snake_case : Tuple = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case_ ) and data_args.train_val_split > 0.0:
_snake_case : Optional[int] = dataset['''train'''].train_test_split(data_args.train_val_split )
_snake_case : List[Any] = split['''train''']
_snake_case : List[Any] = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_snake_case : int = dataset['''train'''].features['''labels'''].names
_snake_case : List[str] = {}, {}
for i, label in enumerate(snake_case_ ):
_snake_case : Optional[Any] = str(snake_case_ )
_snake_case : Optional[int] = label
# Load the accuracy metric from the datasets package
_snake_case : List[Any] = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase: Tuple ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
_snake_case : int = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(snake_case_ ) , labelaid=snake_case_ , idalabel=snake_case_ , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case : int = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
_snake_case : Optional[int] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_snake_case : Optional[Any] = image_processor.size['''shortest_edge''']
else:
_snake_case : Optional[Any] = (image_processor.size['''height'''], image_processor.size['''width'''])
_snake_case : int = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
_snake_case : Tuple = Compose(
[
RandomResizedCrop(snake_case_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_snake_case : Union[str, Any] = Compose(
[
Resize(snake_case_ ),
CenterCrop(snake_case_ ),
ToTensor(),
normalize,
] )
def train_transforms(lowerCAmelCase: Any ):
_snake_case : Any = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(lowerCAmelCase: int ):
_snake_case : int = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_snake_case : Optional[Any] = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(snake_case_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_snake_case : List[str] = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(snake_case_ )
# Initalize our trainer
_snake_case : Optional[Any] = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=snake_case_ , tokenizer=snake_case_ , data_collator=snake_case_ , )
# Training
if training_args.do_train:
_snake_case : List[str] = None
if training_args.resume_from_checkpoint is not None:
_snake_case : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_snake_case : List[Any] = last_checkpoint
_snake_case : str = trainer.train(resume_from_checkpoint=snake_case_ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_snake_case : Optional[int] = trainer.evaluate()
trainer.log_metrics('eval' , snake_case_ )
trainer.save_metrics('eval' , snake_case_ )
# Write model card and (optionally) push to hub
_snake_case : List[Any] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case_ )
else:
trainer.create_model_card(**snake_case_ )
if __name__ == "__main__":
main()
| 719 |
from __future__ import annotations
from random import random
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : int | None = None ):
'''simple docstring'''
_snake_case : str = value
_snake_case : List[Any] = random()
_snake_case : Node | None = None
_snake_case : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = str(self.value ) + ' '
_snake_case : List[Any] = str(self.left or '' )
_snake_case : int = str(self.right or '' )
return value + left + right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_snake_case , _snake_case : Optional[Any] = split(root.left , lowerCAmelCase )
return left, root
else:
_snake_case , _snake_case : List[str] = split(root.right , lowerCAmelCase )
return root, right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: Node | None )-> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_snake_case : str = merge(left.right , lowerCAmelCase )
return left
else:
_snake_case : Union[str, Any] = merge(lowerCAmelCase , right.left )
return right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case : Tuple = Node(lowerCAmelCase )
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , lowerCAmelCase )
return merge(merge(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , value - 1 )
_snake_case , _snake_case : List[str] = split(lowerCAmelCase , lowerCAmelCase )
return merge(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None )-> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: str )-> Node | None:
for arg in args.split():
if arg[0] == "+":
_snake_case : List[str] = insert(lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
_snake_case : Any = erase(lowerCAmelCase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowerCamelCase_ ( )-> None:
_snake_case : Tuple = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
_snake_case : List[Any] = input()
while args != "q":
_snake_case : int = interact_treap(lowerCAmelCase , lowerCAmelCase )
print(lowerCAmelCase )
_snake_case : Tuple = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 669 | 0 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase_ = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] , lowerCAmelCase: str )-> Optional[int]:
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def lowerCamelCase_ ( lowerCAmelCase: Dict )-> str:
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=UpperCamelCase__ )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> List[str]:
_snake_case : Optional[Any] = tmp_path_factory.getbasetemp() / 'cache'
_snake_case : str = test_hf_cache_home / 'datasets'
_snake_case : Optional[int] = test_hf_cache_home / 'metrics'
_snake_case : List[str] = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(UpperCamelCase__ ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(UpperCamelCase__ ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(UpperCamelCase__ ) )
_snake_case : Tuple = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(UpperCamelCase__ ) )
_snake_case : Any = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(UpperCamelCase__ ) )
@pytest.fixture(autouse=UpperCamelCase__ , scope='session' )
def lowerCamelCase_ ( )-> List[str]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=UpperCamelCase__ )
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Union[str, Any]:
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , UpperCamelCase__ )
@pytest.fixture
def lowerCamelCase_ ( lowerCAmelCase: str )-> Dict:
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , UpperCamelCase__ )
| 720 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( lowerCAmelCase: str = N )-> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase , lowerCAmelCase : str(int(lowerCAmelCase ) * int(lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Union[str, Any] =StableUnCLIPPipeline
a_ : int =TEXT_TO_IMAGE_PARAMS
a_ : str =TEXT_TO_IMAGE_BATCH_PARAMS
a_ : Optional[Any] =TEXT_TO_IMAGE_IMAGE_PARAMS
a_ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a_ : Optional[Any] =False
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : int = 32
_snake_case : Tuple = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
_snake_case : Union[str, Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=UpperCamelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
_snake_case : str = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCamelCase , num_layers=1 , )
torch.manual_seed(0 )
_snake_case : Tuple = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=10_00 , clip_sample=UpperCamelCase , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
_snake_case : int = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase )
_snake_case : List[str] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
_snake_case : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
_snake_case : List[str] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
_snake_case : int = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase , layers_per_block=1 , upcast_attention=UpperCamelCase , use_linear_projection=UpperCamelCase , )
torch.manual_seed(0 )
_snake_case : List[str] = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='v_prediction' , set_alpha_to_one=UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
_snake_case : Tuple = AutoencoderKL()
_snake_case : Optional[Any] = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple=0 ):
'''simple docstring'''
if str(UpperCamelCase ).startswith('mps' ):
_snake_case : Union[str, Any] = torch.manual_seed(UpperCamelCase )
else:
_snake_case : Tuple = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
_snake_case : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : int = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : str = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
_snake_case : int = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_snake_case : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
_snake_case : Tuple = pipe('anime turle' , generator=UpperCamelCase , output_type='np' )
_snake_case : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_snake_case : List[str] = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
_snake_case : Union[str, Any] = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_snake_case : Union[str, Any] = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
_snake_case : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 721 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase_ ( )-> Any:
_snake_case : List[str] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_snake_case : Optional[Any] = Dataset.from_dict(lowerCAmelCase )
return dataset
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = get_dataset()
_snake_case : Tuple = make_duplicate_clusters(UpperCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = get_dataset()
_snake_case , _snake_case : str = deduplicate_dataset(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 2 )
print(UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCamelCase )
| 669 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Any ="""char"""
a_ : Union[str, Any] ="""bpe"""
a_ : Optional[Any] ="""wp"""
lowerCAmelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[str] =["""image_processor""", """char_tokenizer"""]
a_ : Tuple ="""ViTImageProcessor"""
a_ : Optional[int] ="""MgpstrTokenizer"""
def __init__( self : Optional[Any] , UpperCamelCase : Dict=None , UpperCamelCase : Tuple=None , **UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
_snake_case : List[Any] = kwargs.pop('feature_extractor' )
_snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
_snake_case : Tuple = tokenizer
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained('gpt2' )
_snake_case : List[str] = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : int , UpperCamelCase : Any=None , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Any=None , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
_snake_case : Union[str, Any] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None:
_snake_case : Optional[Any] = self.char_tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_snake_case : Dict = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self : Any , UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case : Any = sequences
_snake_case : int = char_preds.size(0 )
_snake_case : Optional[Any] = self._decode_helper(UpperCamelCase , 'char' )
_snake_case : List[Any] = self._decode_helper(UpperCamelCase , 'bpe' )
_snake_case : str = self._decode_helper(UpperCamelCase , 'wp' )
_snake_case : List[Any] = []
_snake_case : Union[str, Any] = []
for i in range(UpperCamelCase ):
_snake_case : Optional[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
_snake_case : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
_snake_case : List[str] = scores.index(max(UpperCamelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_snake_case : Optional[Any] = {}
_snake_case : Optional[int] = final_strs
_snake_case : List[str] = final_scores
_snake_case : Tuple = char_strs
_snake_case : Tuple = bpe_strs
_snake_case : List[Any] = wp_strs
return out
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : Tuple ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
_snake_case : Optional[Any] = self.char_decode
_snake_case : Dict = 1
_snake_case : Optional[Any] = '[s]'
elif format == DecodeType.BPE:
_snake_case : Any = self.bpe_decode
_snake_case : int = 2
_snake_case : Union[str, Any] = '#'
elif format == DecodeType.WORDPIECE:
_snake_case : Optional[Any] = self.wp_decode
_snake_case : int = 1_02
_snake_case : str = '[SEP]'
else:
raise ValueError(f"""Format {format} is not supported.""" )
_snake_case : int = [], []
_snake_case : Optional[int] = pred_logits.size(0 )
_snake_case : List[str] = pred_logits.size(1 )
_snake_case : Dict = pred_logits.topk(1 , dim=-1 , largest=UpperCamelCase , sorted=UpperCamelCase )
_snake_case : str = preds_index.view(-1 , UpperCamelCase )[:, 1:]
_snake_case : Union[str, Any] = decoder(UpperCamelCase )
_snake_case : Optional[Any] = torch.nn.functional.softmax(UpperCamelCase , dim=2 ).max(dim=2 )
_snake_case : Any = preds_max_prob[:, 1:]
for index in range(UpperCamelCase ):
_snake_case : List[str] = preds_str[index].find(UpperCamelCase )
_snake_case : Tuple = preds_str[index][:pred_eos]
_snake_case : str = preds_index[index].cpu().tolist()
_snake_case : int = pred_index.index(UpperCamelCase ) if eos_token in pred_index else -1
_snake_case : Tuple = preds_max_prob[index][: pred_eos_index + 1]
_snake_case : int = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(UpperCamelCase )
conf_scores.append(UpperCamelCase )
return dec_strs, conf_scores
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(UpperCamelCase )]
return decode_strs
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(UpperCamelCase )
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : List[str] = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(UpperCamelCase )]
return decode_strs
| 700 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =["""image_processor""", """tokenizer"""]
a_ : Optional[int] ="""CLIPImageProcessor"""
a_ : Optional[Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
_snake_case : Optional[Any] = kwargs.pop('feature_extractor' )
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : Dict , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case : Optional[int] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
_snake_case : Optional[int] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 0 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[Any] =CodeGenTokenizer
a_ : Optional[int] =CodeGenTokenizerFast
a_ : Optional[int] =True
a_ : Union[str, Any] ={"""add_prefix_space""": True}
a_ : int =False
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
_snake_case : Any = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
_snake_case : Dict = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case : Optional[int] = {'unk_token': '<unk>'}
_snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase ) )
def UpperCamelCase_ ( self : Union[str, Any] , **UpperCamelCase : str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : int , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Optional[Any] = 'lower newer'
_snake_case : int = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Any = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case : Any = 'lower newer'
_snake_case : str = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
_snake_case : List[Any] = tokenizer.tokenize(UpperCamelCase , add_prefix_space=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
_snake_case : Any = tokens + [tokenizer.unk_token]
_snake_case : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case : Any = self.get_tokenizer()
_snake_case : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=UpperCamelCase )
_snake_case : Optional[int] = 'lower newer'
# Testing tokenization
_snake_case : Union[str, Any] = tokenizer.tokenize(UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : Union[str, Any] = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# Testing conversion to ids without special tokens
_snake_case : Optional[Any] = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : str = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# Testing conversion to ids with special tokens
_snake_case : Tuple = self.get_rust_tokenizer(add_prefix_space=UpperCamelCase )
_snake_case : str = tokenizer.encode(UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : Union[str, Any] = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# Testing the unknown token
_snake_case : Union[str, Any] = tokens + [rust_tokenizer.unk_token]
_snake_case : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : Dict=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
# Simple input
_snake_case : Union[str, Any] = 'This is a simple input'
_snake_case : Dict = ['This is a simple input 1', 'This is a simple input 2']
_snake_case : Optional[Any] = ('This is a simple input', 'This is a pair')
_snake_case : int = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' )
# Simple input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' )
# Simple input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' , )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' )
# Pair input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' , )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
_snake_case : Optional[int] = 'This is a simple input'
_snake_case : Optional[Any] = ['This is a simple input looooooooong', 'This is a simple input']
_snake_case : int = ('This is a simple input', 'This is a pair')
_snake_case : List[str] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
_snake_case : Union[str, Any] = tokenizer.pad_token_id
_snake_case : Any = tokenizer(UpperCamelCase , padding='max_length' , max_length=30 , return_tensors='np' )
_snake_case : Dict = tokenizer(UpperCamelCase , padding=UpperCamelCase , truncate=UpperCamelCase , return_tensors='np' )
_snake_case : Optional[Any] = tokenizer(*UpperCamelCase , padding='max_length' , max_length=60 , return_tensors='np' )
_snake_case : Optional[Any] = tokenizer(UpperCamelCase , padding=UpperCamelCase , truncate=UpperCamelCase , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : str = '$$$'
_snake_case : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=UpperCamelCase , add_bos_token=UpperCamelCase )
_snake_case : Tuple = 'This is a simple input'
_snake_case : Tuple = ['This is a simple input 1', 'This is a simple input 2']
_snake_case : List[Any] = tokenizer.bos_token_id
_snake_case : int = tokenizer(UpperCamelCase )
_snake_case : Tuple = tokenizer(UpperCamelCase )
self.assertEqual(out_s.input_ids[0] , UpperCamelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_snake_case : Union[str, Any] = tokenizer.decode(out_s.input_ids )
_snake_case : Any = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , UpperCamelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Dict = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
_snake_case : Any = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
_snake_case : Optional[Any] = '\nif len_a > len_b: result = a\nelse: result = b'
_snake_case : Optional[int] = tokenizer.encode(UpperCamelCase )
_snake_case : Optional[Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
_snake_case : Optional[Any] = tokenizer.decode(UpperCamelCase , truncate_before_pattern=UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
| 701 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase_ = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase_ = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =200
a_ : List[str] ={"""Content-Length""": """100"""}
a_ : Tuple ={}
def UpperCamelCase_ ( self : Any , **UpperCamelCase : Any ):
'''simple docstring'''
return [bytes(UpperCamelCase , 'utf-8' )]
def lowerCamelCase_ ( *lowerCAmelCase: Tuple , **lowerCAmelCase: Tuple )-> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> Optional[Any]:
import requests
monkeypatch.setattr(lowerCAmelCase , 'request' , lowerCAmelCase )
_snake_case : List[str] = URL
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[int] = url
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = [url]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': url}
_snake_case : int = 'dummy'
_snake_case : Optional[Any] = 'downloads'
_snake_case : Union[str, Any] = tmp_path
_snake_case : Dict = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase , lowerCAmelCase ) , use_etag=lowerCAmelCase , )
_snake_case : str = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Optional[int] = dl_manager.download(lowerCAmelCase )
_snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = [downloaded_paths]
_snake_case : List[str] = [urls]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
_snake_case : Any = downloaded_paths.values()
_snake_case : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase , lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case : str = Path(lowerCAmelCase )
_snake_case : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case : List[str] = downloaded_path.read_text()
assert content == CONTENT
_snake_case : Any = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any )-> str:
_snake_case : str = str(lowerCAmelCase )
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : str = filename
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[Any] = [filename]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': filename}
_snake_case : Any = 'dummy'
_snake_case : Union[str, Any] = xz_file.parent
_snake_case : int = 'extracted'
_snake_case : Union[str, Any] = DownloadConfig(
cache_dir=lowerCAmelCase , use_etag=lowerCAmelCase , )
_snake_case : List[str] = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Dict = dl_manager.extract(lowerCAmelCase )
_snake_case : Optional[int] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = [extracted_paths]
_snake_case : int = [paths]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in extracted_paths.keys()
_snake_case : Optional[int] = extracted_paths.values()
_snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase , lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case : List[str] = Path(lowerCAmelCase )
_snake_case : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase , etag=lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case : Optional[int] = extracted_path.read_text()
_snake_case : int = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> Dict:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCAmelCase , start=1 ):
_snake_case : Dict = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> Dict:
_snake_case : List[str] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: int )-> str:
_snake_case : List[Any] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase_ ( lowerCAmelCase: Any )-> int:
_snake_case : Tuple = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase ) , start=1 ):
assert os.path.basename(lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 669 | 0 |
def lowerCamelCase_ ( lowerCAmelCase: str )-> bool:
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
_snake_case : Optional[Any] = sorted(string.lower() )
return len(lowerCAmelCase ) == len(set(lowerCAmelCase ) )
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter a string """).strip()
lowerCAmelCase_ = is_isogram(input_str)
print(F"""{input_str} is {"an" if isogram else "not an"} isogram.""")
| 702 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""roberta"""
def __init__( self : int , UpperCamelCase : Tuple=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : List[Any]=12 , UpperCamelCase : str=12 , UpperCamelCase : Dict=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : List[str]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=1e-1_2 , UpperCamelCase : str=1 , UpperCamelCase : int=0 , UpperCamelCase : Any=2 , UpperCamelCase : int="absolute" , UpperCamelCase : int=True , UpperCamelCase : List[Any]=None , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Any = vocab_size
_snake_case : List[str] = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Dict = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Union[str, Any] = use_cache
_snake_case : str = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[int] =field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
a_ : Optional[int] =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a_ : Optional[int] =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
a_ : Optional[int] =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : str =field(
default=UpperCAmelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ : str =field(
default=UpperCAmelCase_ , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Train language if it is different from the evaluation language."""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ : Optional[bool] =field(
default=UpperCAmelCase_ , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a_ : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def lowerCamelCase_ ( )-> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_snake_case : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_snake_case : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , lowerCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_snake_case : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase )
datasets.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_snake_case : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_snake_case : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_snake_case : List[Any] = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
_snake_case : Optional[Any] = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case : str = train_dataset.features['label'].names
if training_args.do_eval:
_snake_case : List[str] = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case : Tuple = eval_dataset.features['label'].names
if training_args.do_predict:
_snake_case : List[str] = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case : Optional[int] = predict_dataset.features['label'].names
# Labels
_snake_case : Any = len(lowerCAmelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase , idalabel={str(lowerCAmelCase ): label for i, label in enumerate(lowerCAmelCase )} , labelaid={label: i for i, label in enumerate(lowerCAmelCase )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case : str = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_snake_case : Dict = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_snake_case : Optional[int] = False
def preprocess_function(lowerCAmelCase: Optional[Any] ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=lowerCAmelCase , max_length=data_args.max_seq_length , truncation=lowerCAmelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
_snake_case : Optional[int] = min(len(lowerCAmelCase ) , data_args.max_train_samples )
_snake_case : List[Any] = train_dataset.select(range(lowerCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_snake_case : str = train_dataset.map(
lowerCAmelCase , batched=lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowerCAmelCase ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_snake_case : Optional[int] = min(len(lowerCAmelCase ) , data_args.max_eval_samples )
_snake_case : Union[str, Any] = eval_dataset.select(range(lowerCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_snake_case : Optional[int] = eval_dataset.map(
lowerCAmelCase , batched=lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_snake_case : Union[str, Any] = min(len(lowerCAmelCase ) , data_args.max_predict_samples )
_snake_case : Union[str, Any] = predict_dataset.select(range(lowerCAmelCase ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
_snake_case : int = predict_dataset.map(
lowerCAmelCase , batched=lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
_snake_case : str = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase: EvalPrediction ):
_snake_case : Dict = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase ) else p.predictions
_snake_case : Any = np.argmax(lowerCAmelCase , axis=1 )
return metric.compute(predictions=lowerCAmelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_snake_case : Tuple = default_data_collator
elif training_args.fpaa:
_snake_case : List[str] = DataCollatorWithPadding(lowerCAmelCase , pad_to_multiple_of=8 )
else:
_snake_case : str = None
# Initialize our Trainer
_snake_case : List[Any] = Trainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase , tokenizer=lowerCAmelCase , data_collator=lowerCAmelCase , )
# Training
if training_args.do_train:
_snake_case : Any = None
if training_args.resume_from_checkpoint is not None:
_snake_case : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_snake_case : Tuple = last_checkpoint
_snake_case : str = trainer.train(resume_from_checkpoint=lowerCAmelCase )
_snake_case : Any = train_result.metrics
_snake_case : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase )
)
_snake_case : List[str] = min(lowerCAmelCase , len(lowerCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCAmelCase )
trainer.save_metrics('train' , lowerCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_snake_case : Dict = trainer.evaluate(eval_dataset=lowerCAmelCase )
_snake_case : List[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase )
_snake_case : Tuple = min(lowerCAmelCase , len(lowerCAmelCase ) )
trainer.log_metrics('eval' , lowerCAmelCase )
trainer.save_metrics('eval' , lowerCAmelCase )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
_snake_case : Optional[int] = trainer.predict(lowerCAmelCase , metric_key_prefix='predict' )
_snake_case : Tuple = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCAmelCase )
)
_snake_case : List[str] = min(lowerCAmelCase , len(lowerCAmelCase ) )
trainer.log_metrics('predict' , lowerCAmelCase )
trainer.save_metrics('predict' , lowerCAmelCase )
_snake_case : List[Any] = np.argmax(lowerCAmelCase , axis=1 )
_snake_case : int = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCAmelCase ):
_snake_case : Union[str, Any] = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 703 |
from random import randint, random
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: int = 5 , )-> list:
_snake_case : Dict = [[-1] * number_of_cells] # Create a highway without any car
_snake_case : List[str] = 0
_snake_case : List[str] = max(lowerCAmelCase , 0 )
while i < number_of_cells:
_snake_case : Optional[Any] = (
randint(0 , lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int )-> int:
_snake_case : Dict = 0
_snake_case : Optional[Any] = highway_now[car_index + 1 :]
for cell in range(len(lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCAmelCase , -1 )
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : List[Any] = len(lowerCAmelCase )
# Beforce calculations, the highway is empty
_snake_case : List[Any] = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_snake_case : int = min(highway_now[car_index] + 1 , lowerCAmelCase )
# Number of empty cell before the next car
_snake_case : Tuple = get_distance(lowerCAmelCase , lowerCAmelCase ) - 1
# We can't have the car causing an accident
_snake_case : Union[str, Any] = min(next_highway[car_index] , lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
_snake_case : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : Dict = len(highway[0] )
for i in range(lowerCAmelCase ):
_snake_case : Any = update(highway[i] , lowerCAmelCase , lowerCAmelCase )
_snake_case : Tuple = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
_snake_case : Union[str, Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_snake_case : Union[str, Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_snake_case : Tuple = speed
highway.append(lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( lowerCAmelCase: Dict , lowerCAmelCase: List[Any] , lowerCAmelCase: str )-> int:
# Initialise PyTorch model
_snake_case : Optional[int] = BertConfig.from_json_file(lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
_snake_case : Optional[int] = BertForPreTraining(lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 704 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =VOCAB_FILES_NAMES
a_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
a_ : str =PRETRAINED_INIT_CONFIGURATION
a_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] =RealmTokenizer
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]="[UNK]" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Optional[Any]="[PAD]" , UpperCamelCase : Optional[int]="[CLS]" , UpperCamelCase : Optional[Any]="[MASK]" , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : int = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : List[str] = do_lower_case
_snake_case : List[Any] = strip_accents
_snake_case : Dict = tokenize_chinese_chars
_snake_case : Any = normalizer_class(**UpperCamelCase )
_snake_case : Optional[int] = do_lower_case
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[str] = kwargs.pop('text_pair' , UpperCamelCase )
_snake_case : int = kwargs.pop('return_tensors' , UpperCamelCase )
_snake_case : Optional[int] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCamelCase ):
if batch_text_pair is not None:
_snake_case : List[Any] = batch_text_pair[idx]
else:
_snake_case : Optional[Any] = None
_snake_case : Optional[int] = super().__call__(UpperCamelCase , UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
_snake_case : str = encoded_candidates.get('input_ids' )
_snake_case : Tuple = encoded_candidates.get('attention_mask' )
_snake_case : List[str] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCamelCase )
_snake_case : str = {key: item for key, item in output_data.items() if len(UpperCamelCase ) != 0}
return BatchEncoding(UpperCamelCase , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : int = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 0 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[Any] =ConsistencyModelPipeline
a_ : List[str] =UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a_ : Optional[Any] =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
a_ : Tuple =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Dict=False ):
'''simple docstring'''
if class_cond:
_snake_case : Dict = self.dummy_cond_unet
else:
_snake_case : Any = self.dummy_uncond_unet
# Default to CM multistep sampler
_snake_case : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_snake_case : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[int]=0 ):
'''simple docstring'''
if str(UpperCamelCase ).startswith('mps' ):
_snake_case : Optional[int] = torch.manual_seed(UpperCamelCase )
else:
_snake_case : Union[str, Any] = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
_snake_case : Tuple = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : List[str] = self.get_dummy_components()
_snake_case : Dict = ConsistencyModelPipeline(**UpperCamelCase )
_snake_case : List[str] = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : Dict = self.get_dummy_inputs(UpperCamelCase )
_snake_case : str = pipe(**UpperCamelCase ).images
assert image.shape == (1, 32, 32, 3)
_snake_case : Optional[Any] = image[0, -3:, -3:, -1]
_snake_case : List[str] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : List[str] = self.get_dummy_components(class_cond=UpperCamelCase )
_snake_case : Any = ConsistencyModelPipeline(**UpperCamelCase )
_snake_case : Any = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : List[Any] = self.get_dummy_inputs(UpperCamelCase )
_snake_case : int = 0
_snake_case : List[str] = pipe(**UpperCamelCase ).images
assert image.shape == (1, 32, 32, 3)
_snake_case : Optional[int] = image[0, -3:, -3:, -1]
_snake_case : Union[str, Any] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : Optional[int] = self.get_dummy_components()
_snake_case : int = ConsistencyModelPipeline(**UpperCamelCase )
_snake_case : int = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : str = self.get_dummy_inputs(UpperCamelCase )
_snake_case : Tuple = 1
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = pipe(**UpperCamelCase ).images
assert image.shape == (1, 32, 32, 3)
_snake_case : str = image[0, -3:, -3:, -1]
_snake_case : Tuple = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : Optional[int] = self.get_dummy_components(class_cond=UpperCamelCase )
_snake_case : Tuple = ConsistencyModelPipeline(**UpperCamelCase )
_snake_case : Union[str, Any] = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : Dict = self.get_dummy_inputs(UpperCamelCase )
_snake_case : List[Any] = 1
_snake_case : Optional[Any] = None
_snake_case : Optional[Any] = 0
_snake_case : int = pipe(**UpperCamelCase ).images
assert image.shape == (1, 32, 32, 3)
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : Tuple = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Tuple=0 , UpperCamelCase : List[Any]=False , UpperCamelCase : Optional[int]="cpu" , UpperCamelCase : List[Any]=torch.floataa , UpperCamelCase : List[Any]=(1, 3, 64, 64) ):
'''simple docstring'''
_snake_case : str = torch.manual_seed(UpperCamelCase )
_snake_case : Tuple = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
_snake_case : Union[str, Any] = self.get_fixed_latents(seed=UpperCamelCase , device=UpperCamelCase , dtype=UpperCamelCase , shape=UpperCamelCase )
_snake_case : Dict = latents
return inputs
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : List[str]="cpu" , UpperCamelCase : Optional[Any]=torch.floataa , UpperCamelCase : Tuple=(1, 3, 64, 64) ):
'''simple docstring'''
if type(UpperCamelCase ) == str:
_snake_case : List[Any] = torch.device(UpperCamelCase )
_snake_case : Any = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
_snake_case : List[str] = randn_tensor(UpperCamelCase , generator=UpperCamelCase , device=UpperCamelCase , dtype=UpperCamelCase )
return latents
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
_snake_case : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_snake_case : int = ConsistencyModelPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase )
pipe.to(torch_device=UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : List[str] = self.get_inputs()
_snake_case : Union[str, Any] = pipe(**UpperCamelCase ).images
assert image.shape == (1, 64, 64, 3)
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Tuple = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
_snake_case : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_snake_case : int = ConsistencyModelPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase )
pipe.to(torch_device=UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : Any = self.get_inputs()
_snake_case : List[str] = 1
_snake_case : Dict = None
_snake_case : List[Any] = pipe(**UpperCamelCase ).images
assert image.shape == (1, 64, 64, 3)
_snake_case : List[Any] = image[0, -3:, -3:, -1]
_snake_case : str = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : List[str] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
_snake_case : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_snake_case : int = ConsistencyModelPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase )
pipe.to(torch_device=UpperCamelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : Union[str, Any] = self.get_inputs(get_fixed_latents=UpperCamelCase , device=UpperCamelCase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCamelCase , enable_math=UpperCamelCase , enable_mem_efficient=UpperCamelCase ):
_snake_case : int = pipe(**UpperCamelCase ).images
assert image.shape == (1, 64, 64, 3)
_snake_case : str = image[0, -3:, -3:, -1]
_snake_case : List[str] = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
_snake_case : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_snake_case : List[str] = ConsistencyModelPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase )
pipe.to(torch_device=UpperCamelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : Tuple = self.get_inputs(get_fixed_latents=UpperCamelCase , device=UpperCamelCase )
_snake_case : Union[str, Any] = 1
_snake_case : Optional[Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCamelCase , enable_math=UpperCamelCase , enable_mem_efficient=UpperCamelCase ):
_snake_case : Optional[Any] = pipe(**UpperCamelCase ).images
assert image.shape == (1, 64, 64, 3)
_snake_case : Optional[int] = image[0, -3:, -3:, -1]
_snake_case : Optional[Any] = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 705 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] )-> Optional[int]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_snake_case : Tuple = TOKENIZER_CLASSES
else:
_snake_case : Union[str, Any] = {tokenizer_name: getattr(lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_snake_case : Dict = TOKENIZER_CLASSES[tokenizer_name]
_snake_case : Optional[Any] = True
if checkpoint_name is None:
_snake_case : Union[str, Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_snake_case : Optional[int] = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_snake_case : str = tokenizer_class.from_pretrained(lowerCAmelCase , force_download=lowerCAmelCase )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_snake_case , _snake_case : Tuple = checkpoint.split('/' )
_snake_case : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
elif add_prefix:
_snake_case : Dict = checkpoint
_snake_case : Optional[Any] = dump_path
else:
_snake_case : str = None
_snake_case : Union[str, Any] = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_snake_case : Optional[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_snake_case : Optional[int] = file_path.split(lowerCAmelCase )[-1][0]
if next_char == "/":
_snake_case : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
_snake_case : str = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_snake_case : Optional[int] = tokenizer.save_pretrained(
lowerCAmelCase , legacy_format=lowerCAmelCase , filename_prefix=lowerCAmelCase )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCAmelCase )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCAmelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 0 |
import math
def lowerCamelCase_ ( lowerCAmelCase: int )-> list[int]:
_snake_case : str = []
_snake_case : Optional[int] = 2
_snake_case : int = int(math.sqrt(lowerCAmelCase ) ) # Size of every segment
_snake_case : List[Any] = [True] * (end + 1)
_snake_case : Dict = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase )
for i in range(start * start , end + 1 , lowerCAmelCase ):
_snake_case : int = False
start += 1
prime += in_prime
_snake_case : Any = end + 1
_snake_case : int = min(2 * end , lowerCAmelCase )
while low <= n:
_snake_case : List[str] = [True] * (high - low + 1)
for each in in_prime:
_snake_case : List[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase , high + 1 , lowerCAmelCase ):
_snake_case : Tuple = False
for j in range(len(lowerCAmelCase ) ):
if temp[j] is True:
prime.append(j + low )
_snake_case : List[Any] = high + 1
_snake_case : Tuple = min(high + end , lowerCAmelCase )
return prime
print(sieve(10**6))
| 706 |
def lowerCamelCase_ ( lowerCAmelCase: bytes )-> str:
return "".join([hex(lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase )] )
def lowerCamelCase_ ( lowerCAmelCase: str )-> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCAmelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
from functools import lru_cache
@lru_cache
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
def lowerCamelCase_ ( lowerCAmelCase: str )-> None:
# authorize twitter, initialize tweepy
_snake_case : Optional[Any] = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
_snake_case : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : List[str] = api.user_timeline(screen_name=lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
_snake_case : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : Tuple = api.user_timeline(
screen_name=lowerCAmelCase , count=2_00 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
_snake_case : List[str] = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
_snake_case : Any = csv.writer(lowerCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 669 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 708 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[Union[str, Path]] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : bool =True
a_ : Optional[int] =None
a_ : int =1
a_ : Optional[Union[str, bool]] =None
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase ) for k, v in self.__dict__.items()} )
| 669 | 0 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase: list[int] , lowerCAmelCase: int )-> list[list[int]]:
_snake_case : list[list[int]] = []
_snake_case : list[int] = []
_snake_case : int = 0
_snake_case : Tuple = sum(lowerCAmelCase )
create_state_space_tree(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return result
def lowerCamelCase_ ( lowerCAmelCase: list[int] , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: list[int] , lowerCAmelCase: list[list[int]] , lowerCAmelCase: int , )-> None:
if sum(lowerCAmelCase ) > max_sum or (remaining_nums_sum + sum(lowerCAmelCase )) < max_sum:
return
if sum(lowerCAmelCase ) == max_sum:
result.append(lowerCAmelCase )
return
for index in range(lowerCAmelCase , len(lowerCAmelCase ) ):
create_state_space_tree(
lowerCAmelCase , lowerCAmelCase , index + 1 , [*path, nums[index]] , lowerCAmelCase , remaining_nums_sum - nums[index] , )
lowerCAmelCase_ = [3, 34, 4, 12, 5, 2]
lowerCAmelCase_ = 9
lowerCAmelCase_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 709 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase_ = ["""gpt2"""]
lowerCAmelCase_ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = tokenizer
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase )
_snake_case : int = TFGPTaLMHeadModel.from_config(UpperCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer(UpperCamelCase )
_snake_case : Union[str, Any] = tokenized['input_ids'].to_tensor()
_snake_case : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_snake_case : Tuple = self.model(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
_snake_case : Optional[int] = [GPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_snake_case : Tuple = [TFGPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case : Any = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_snake_case : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_snake_case : Optional[int] = tokenizer([test_inputs] , return_tensors='tf' )
_snake_case : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_snake_case : Dict = python_outputs[key].numpy()
_snake_case : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : str = tf.function(UpperCamelCase )
for test_inputs in self.test_sentences:
_snake_case : int = tf.constant(UpperCamelCase )
_snake_case : Tuple = compiled_tokenizer(UpperCamelCase )
_snake_case : int = tf_tokenizer(UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Union[str, Any] = ModelToSave(tokenizer=UpperCamelCase )
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Tuple = model.serving(UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case : str = Path(UpperCamelCase ) / 'saved.model'
tf.saved_model.save(UpperCamelCase , UpperCamelCase , signatures={'serving_default': model.serving} )
_snake_case : Optional[int] = tf.saved_model.load(UpperCamelCase )
_snake_case : List[str] = loaded_model.signatures['serving_default'](UpperCamelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Any = tf_tokenizer(UpperCamelCase ) # Build model with some sample inputs
_snake_case : Optional[Any] = tf_tokenizer.get_config()
_snake_case : Tuple = TFGPTaTokenizer.from_config(UpperCamelCase )
_snake_case : Optional[Any] = model_from_config(UpperCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_snake_case : Union[str, Any] = 12_31_23
for max_length in [3, 5, 10_24]:
_snake_case : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : List[str] = tf_tokenizer(UpperCamelCase , max_length=UpperCamelCase )
_snake_case : int = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 669 | 0 |
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = {}
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase , ' -> ' , ' -> '.join([str(UpperCamelCase ) for j in self.vertex[i]] ) )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase )
else:
# else make a new vertex
_snake_case : str = [to_vertex]
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Any = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : list ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
print(UpperCamelCase , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 710 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> list:
_snake_case : List[Any] = int(lowerCAmelCase )
if n_element < 1:
_snake_case : int = ValueError('a should be a positive number' )
raise my_error
_snake_case : Union[str, Any] = [1]
_snake_case , _snake_case , _snake_case : Any = (0, 0, 0)
_snake_case : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 669 | 0 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase : int , UpperCamelCase : Union[str, Any]=7 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Optional[Any]=18 , UpperCamelCase : Any=30 , UpperCamelCase : Tuple=4_00 , UpperCamelCase : int=None , UpperCamelCase : int=True , UpperCamelCase : List[Any]=True , UpperCamelCase : Union[str, Any]=None , ):
'''simple docstring'''
_snake_case : Dict = size if size is not None else {'height': 20, 'width': 20}
_snake_case : List[Any] = parent
_snake_case : int = batch_size
_snake_case : int = num_channels
_snake_case : Optional[int] = image_size
_snake_case : Optional[int] = min_resolution
_snake_case : Any = max_resolution
_snake_case : Dict = size
_snake_case : Any = do_normalize
_snake_case : List[Any] = do_convert_rgb
_snake_case : Any = [5_12, 10_24, 20_48, 40_96]
_snake_case : Optional[int] = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : List[str] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
_snake_case : Any = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Dict =PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = PixaStructImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_convert_rgb' ) )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : List[str] = self.image_processor_tester.prepare_dummy_image()
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
_snake_case : Optional[int] = 20_48
_snake_case : str = image_processor(UpperCamelCase , return_tensors='pt' , max_patches=UpperCamelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1e-3 , rtol=1e-3 ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : List[str] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_snake_case : str = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_snake_case : Tuple = image_processor(
UpperCamelCase , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : Optional[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
_snake_case : str = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(UpperCamelCase ):
_snake_case : List[str] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
_snake_case : int = 'Hello'
_snake_case : str = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCamelCase , header_text=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_snake_case : List[Any] = image_processor(
UpperCamelCase , return_tensors='pt' , max_patches=UpperCamelCase , header_text=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
_snake_case : Tuple = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_snake_case : Tuple = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_snake_case : Tuple = image_processor(
UpperCamelCase , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
_snake_case : List[str] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_snake_case : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_snake_case : str = image_processor(
UpperCamelCase , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = PixaStructImageProcessingTester(self , num_channels=4 )
_snake_case : Union[str, Any] = 3
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_convert_rgb' ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : int = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_snake_case : Dict = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_snake_case : Dict = image_processor(
UpperCamelCase , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 711 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Tuple="shi-labs/oneformer_demo" )-> Any:
with open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) as f:
_snake_case : str = json.load(lowerCAmelCase )
_snake_case : List[str] = {}
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = []
for key, info in class_info.items():
_snake_case : Optional[int] = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase ) )
_snake_case : List[str] = thing_ids
_snake_case : Optional[Any] = class_names
return metadata
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any=7 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Dict=30 , UpperCamelCase : int=4_00 , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : str=True , UpperCamelCase : Any=[0.5, 0.5, 0.5] , UpperCamelCase : int=[0.5, 0.5, 0.5] , UpperCamelCase : Dict=10 , UpperCamelCase : Dict=False , UpperCamelCase : Dict=2_55 , UpperCamelCase : Dict="shi-labs/oneformer_demo" , UpperCamelCase : Optional[int]="ade20k_panoptic.json" , UpperCamelCase : Tuple=10 , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Tuple = num_channels
_snake_case : List[str] = min_resolution
_snake_case : List[str] = max_resolution
_snake_case : Optional[Any] = do_resize
_snake_case : Optional[Any] = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
_snake_case : Optional[int] = do_normalize
_snake_case : Any = image_mean
_snake_case : List[Any] = image_std
_snake_case : Any = class_info_file
_snake_case : List[str] = prepare_metadata(UpperCamelCase , UpperCamelCase )
_snake_case : Any = num_text
_snake_case : str = repo_path
# for the post_process_functions
_snake_case : Optional[Any] = 2
_snake_case : str = 10
_snake_case : Union[str, Any] = 10
_snake_case : List[Any] = 3
_snake_case : str = 4
_snake_case : List[Any] = num_labels
_snake_case : str = do_reduce_labels
_snake_case : List[str] = ignore_index
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
if not batched:
_snake_case : Any = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
_snake_case , _snake_case : Any = image.size
else:
_snake_case , _snake_case : Any = image.shape[1], image.shape[2]
if w < h:
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
_snake_case : Any = self.size['shortest_edge']
elif w > h:
_snake_case : int = self.size['shortest_edge']
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
_snake_case : Dict = self.size['shortest_edge']
_snake_case : Dict = self.size['shortest_edge']
else:
_snake_case : List[Any] = []
for image in image_inputs:
_snake_case , _snake_case : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : List[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
_snake_case : Optional[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ : Any =image_processing_class
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCamelCase , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCamelCase , 'num_text' ) )
self.assertTrue(hasattr(UpperCamelCase , 'repo_path' ) )
self.assertTrue(hasattr(UpperCamelCase , 'metadata' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_reduce_labels' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : Optional[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : List[Any] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : int = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
_snake_case : int = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : Optional[int] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
_snake_case : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : List[str] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Tuple=False , UpperCamelCase : str=False , UpperCamelCase : Dict="np" ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_snake_case : List[str] = self.image_processing_tester.num_labels
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
if with_segmentation_maps:
_snake_case : Optional[int] = num_labels
if is_instance_map:
_snake_case : Union[str, Any] = list(range(UpperCamelCase ) ) * 2
_snake_case : Tuple = dict(enumerate(UpperCamelCase ) )
_snake_case : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_snake_case : int = [Image.fromarray(UpperCamelCase ) for annotation in annotations]
_snake_case : List[Any] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , UpperCamelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCamelCase , pad_and_return_pixel_mask=UpperCamelCase , )
return inputs
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
def common(UpperCamelCase : Any=False , UpperCamelCase : int=None ):
_snake_case : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase , is_instance_map=UpperCamelCase , segmentation_type=UpperCamelCase )
_snake_case : Union[str, Any] = inputs['mask_labels']
_snake_case : Optional[int] = inputs['class_labels']
_snake_case : Optional[int] = inputs['pixel_values']
_snake_case : Optional[Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.zeros((20, 50) )
_snake_case : int = 1
_snake_case : int = 1
_snake_case : Optional[Any] = 1
_snake_case : List[Any] = binary_mask_to_rle(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = fature_extractor.post_process_semantic_segmentation(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_snake_case : Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_snake_case : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(UpperCamelCase , target_sizes=UpperCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : int = image_processor.post_process_instance_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = image_processor.post_process_panoptic_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 669 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] ="""xlm-roberta"""
def __init__( self : str , UpperCamelCase : str=3_05_22 , UpperCamelCase : str=7_68 , UpperCamelCase : List[str]=12 , UpperCamelCase : Tuple=12 , UpperCamelCase : List[str]=30_72 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=5_12 , UpperCamelCase : Any=2 , UpperCamelCase : int=0.02 , UpperCamelCase : Union[str, Any]=1e-1_2 , UpperCamelCase : Optional[int]=1 , UpperCamelCase : Optional[Any]=0 , UpperCamelCase : List[str]=2 , UpperCamelCase : List[str]="absolute" , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=None , **UpperCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : List[Any] = vocab_size
_snake_case : Dict = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Tuple = num_attention_heads
_snake_case : Union[str, Any] = hidden_act
_snake_case : str = intermediate_size
_snake_case : Dict = hidden_dropout_prob
_snake_case : int = attention_probs_dropout_prob
_snake_case : Optional[Any] = max_position_embeddings
_snake_case : Union[str, Any] = type_vocab_size
_snake_case : Optional[Any] = initializer_range
_snake_case : Any = layer_norm_eps
_snake_case : Union[str, Any] = position_embedding_type
_snake_case : Tuple = use_cache
_snake_case : int = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : str = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 712 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase_ = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCamelCase_ ( )-> Tuple:
_snake_case : int = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_snake_case : int = get_sagemaker_input()
else:
_snake_case : Any = get_cluster_input()
return config
def lowerCamelCase_ ( lowerCAmelCase: str=None )-> Any:
if subparsers is not None:
_snake_case : List[Any] = subparsers.add_parser('config' , description=lowerCAmelCase )
else:
_snake_case : Dict = argparse.ArgumentParser('Accelerate config command' , description=lowerCAmelCase )
parser.add_argument(
'--config_file' , default=lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Any:
_snake_case : Dict = get_user_input()
if args.config_file is not None:
_snake_case : List[str] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
_snake_case : Union[str, Any] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowerCAmelCase )
else:
config.to_yaml_file(lowerCAmelCase )
print(F"""accelerate configuration saved at {config_file}""" )
def lowerCamelCase_ ( )-> Dict:
_snake_case : List[str] = config_command_parser()
_snake_case : str = parser.parse_args()
config_command(lowerCAmelCase )
if __name__ == "__main__":
main()
| 669 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCAmelCase_ = r"""
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `\" / \"`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `\" // \"`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `\"train\"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `\"compressed\"`)
The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and
`\"compressed\"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a \"dummy\" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
"""
@add_start_docstrings(UpperCAmelCase_ )
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""rag"""
a_ : str =True
def __init__( self : Union[str, Any] , UpperCamelCase : int=None , UpperCamelCase : str=True , UpperCamelCase : List[str]=None , UpperCamelCase : List[str]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Tuple=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : int=" / " , UpperCamelCase : List[str]=" // " , UpperCamelCase : Any=5 , UpperCamelCase : Optional[int]=3_00 , UpperCamelCase : Dict=7_68 , UpperCamelCase : List[Any]=8 , UpperCamelCase : Tuple="wiki_dpr" , UpperCamelCase : List[Any]="train" , UpperCamelCase : Optional[int]="compressed" , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Tuple=False , UpperCamelCase : Tuple=0.0 , UpperCamelCase : str=True , UpperCamelCase : str=False , UpperCamelCase : int=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : str , ):
'''simple docstring'''
super().__init__(
bos_token_id=UpperCamelCase , pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , prefix=UpperCamelCase , vocab_size=UpperCamelCase , **UpperCamelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_snake_case : Tuple = kwargs.pop('question_encoder' )
_snake_case : int = question_encoder_config.pop('model_type' )
_snake_case : Dict = kwargs.pop('generator' )
_snake_case : Optional[Any] = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
_snake_case : Union[str, Any] = AutoConfig.for_model(UpperCamelCase , **UpperCamelCase )
_snake_case : Dict = AutoConfig.for_model(UpperCamelCase , **UpperCamelCase )
_snake_case : Tuple = reduce_loss
_snake_case : Optional[Any] = label_smoothing
_snake_case : List[str] = exclude_bos_score
_snake_case : int = do_marginalize
_snake_case : Any = title_sep
_snake_case : Optional[Any] = doc_sep
_snake_case : Union[str, Any] = n_docs
_snake_case : Dict = max_combined_length
_snake_case : List[str] = dataset
_snake_case : Union[str, Any] = dataset_split
_snake_case : Dict = index_name
_snake_case : Any = retrieval_vector_size
_snake_case : List[str] = retrieval_batch_size
_snake_case : Dict = passages_path
_snake_case : List[Any] = index_path
_snake_case : Tuple = use_dummy_dataset
_snake_case : int = output_retrieved
_snake_case : str = do_deduplication
_snake_case : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
_snake_case : Dict = getattr(self.generator , 'forced_eos_token_id' , UpperCamelCase )
@classmethod
def UpperCamelCase_ ( cls : Any , UpperCamelCase : PretrainedConfig , UpperCamelCase : PretrainedConfig , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = copy.deepcopy(self.__dict__ )
_snake_case : Optional[int] = self.question_encoder.to_dict()
_snake_case : int = self.generator.to_dict()
_snake_case : str = self.__class__.model_type
return output
| 713 |
# Function to print upper half of diamond (pyramid)
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> List[str]:
for i in range(0 , lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> List[Any]:
for i in range(lowerCAmelCase , 0 , -1 ):
for _ in range(lowerCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> int:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase ) # upper half
reverse_floyd(lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
lowerCAmelCase_ = 1
while K:
lowerCAmelCase_ = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
lowerCAmelCase_ = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 669 | 0 |
def lowerCamelCase_ ( lowerCAmelCase: int = 50_00_00_00 )-> int:
_snake_case : Dict = set()
_snake_case : Dict = int((limit - 24) ** (1 / 2) )
_snake_case : List[Any] = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowerCAmelCase ) ) )
for primea in primes:
_snake_case : Optional[Any] = primea * primea
for primea in primes:
_snake_case : str = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
_snake_case : List[Any] = primea * primea * primea * primea
_snake_case : Dict = square + cube + tetr
if total >= limit:
break
ret.add(lowerCAmelCase )
return len(lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 714 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""audio-spectrogram-transformer"""
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any]=7_68 , UpperCamelCase : int=12 , UpperCamelCase : str=12 , UpperCamelCase : Tuple=30_72 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Any=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : str=16 , UpperCamelCase : List[Any]=True , UpperCamelCase : Any=10 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : int=10_24 , UpperCamelCase : Optional[Any]=1_28 , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : Tuple = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : int = patch_size
_snake_case : List[str] = qkv_bias
_snake_case : int = frequency_stride
_snake_case : List[Any] = time_stride
_snake_case : List[Any] = max_length
_snake_case : List[str] = num_mel_bins
| 669 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCAmelCase_ = """Create a default config file for Accelerate with only a few flags set."""
def lowerCamelCase_ ( lowerCAmelCase: Dict="no" , lowerCAmelCase: str = default_json_config_file , lowerCAmelCase: bool = False )-> Union[str, Any]:
_snake_case : str = Path(lowerCAmelCase )
path.parent.mkdir(parents=lowerCAmelCase , exist_ok=lowerCAmelCase )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
_snake_case : Any = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
_snake_case : Optional[int] = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
_snake_case : int = torch.cuda.device_count()
_snake_case : Dict = num_gpus
_snake_case : str = False
if num_gpus > 1:
_snake_case : Optional[int] = 'MULTI_GPU'
else:
_snake_case : Dict = 'NO'
elif is_xpu_available() and use_xpu:
_snake_case : Optional[int] = torch.xpu.device_count()
_snake_case : Dict = num_xpus
_snake_case : Optional[int] = False
if num_xpus > 1:
_snake_case : Dict = 'MULTI_XPU'
else:
_snake_case : Optional[Any] = 'NO'
elif is_npu_available():
_snake_case : List[Any] = torch.npu.device_count()
_snake_case : int = num_npus
_snake_case : int = False
if num_npus > 1:
_snake_case : Optional[int] = 'MULTI_NPU'
else:
_snake_case : Tuple = 'NO'
else:
_snake_case : Optional[Any] = 0
_snake_case : Tuple = True
_snake_case : List[Any] = 1
_snake_case : Tuple = 'NO'
_snake_case : List[str] = ClusterConfig(**lowerCAmelCase )
config.to_json_file(lowerCAmelCase )
return path
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] , lowerCAmelCase: Dict )-> List[str]:
_snake_case : Tuple = parser.add_parser('default' , parents=lowerCAmelCase , help=lowerCAmelCase , formatter_class=lowerCAmelCase )
parser.add_argument(
'--config_file' , default=lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=lowerCAmelCase , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=lowerCAmelCase )
return parser
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> Dict:
_snake_case : Any = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 715 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: bool = True , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: bool = False , lowerCAmelCase: float = 1_00 , lowerCAmelCase: float = 0.0_1 , lowerCAmelCase: float = 1 , )-> Any:
_snake_case : int = False
_snake_case : Any = search_prob
_snake_case : Tuple = start_temperate
_snake_case : Any = []
_snake_case : List[str] = 0
_snake_case : Optional[Any] = None
while not search_end:
_snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case : Dict = current_state
scores.append(lowerCAmelCase )
iterations += 1
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case : Dict = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
_snake_case : int = neighbors.pop(lowerCAmelCase )
_snake_case : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case : Union[str, Any] = picked_neighbor
else:
_snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case : int = picked_neighbor
_snake_case : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case : List[str] = True
else:
_snake_case : Union[str, Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[Any] )-> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Dict:
return (3 * x**2) - (6 * y)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 669 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase_ = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
lowerCAmelCase_ = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
lowerCAmelCase_ = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
lowerCAmelCase_ = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 716 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : torch.FloatTensor
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : str , UpperCamelCase : int = 32 , UpperCamelCase : int = 64 , UpperCamelCase : int = 20 , UpperCamelCase : int = 7_68 , UpperCamelCase : Optional[int]=77 , UpperCamelCase : int=4 , UpperCamelCase : float = 0.0 , UpperCamelCase : str = "silu" , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = "linear" , UpperCamelCase : Optional[str] = "prd" , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case : str = num_attention_heads
_snake_case : Optional[int] = attention_head_dim
_snake_case : Any = num_attention_heads * attention_head_dim
_snake_case : List[Any] = additional_embeddings
_snake_case : List[str] = time_embed_dim or inner_dim
_snake_case : int = embedding_proj_dim or embedding_dim
_snake_case : List[Any] = clip_embed_dim or embedding_dim
_snake_case : Optional[Any] = Timesteps(UpperCamelCase , UpperCamelCase , 0 )
_snake_case : List[Any] = TimestepEmbedding(UpperCamelCase , UpperCamelCase , out_dim=UpperCamelCase , act_fn=UpperCamelCase )
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
if embedding_proj_norm_type is None:
_snake_case : str = None
elif embedding_proj_norm_type == "layer":
_snake_case : List[Any] = nn.LayerNorm(UpperCamelCase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_snake_case : str = nn.Linear(UpperCamelCase , UpperCamelCase )
if encoder_hid_proj_type is None:
_snake_case : Any = None
elif encoder_hid_proj_type == "linear":
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase ) )
if added_emb_type == "prd":
_snake_case : str = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase ) )
elif added_emb_type is None:
_snake_case : Dict = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_snake_case : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , activation_fn='gelu' , attention_bias=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
if norm_in_type == "layer":
_snake_case : Optional[int] = nn.LayerNorm(UpperCamelCase )
elif norm_in_type is None:
_snake_case : Optional[Any] = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
_snake_case : Optional[Any] = nn.LayerNorm(UpperCamelCase )
_snake_case : Union[str, Any] = nn.Linear(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_snake_case : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , UpperCamelCase , persistent=UpperCamelCase )
_snake_case : str = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = {}
def fn_recursive_add_processors(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase , 'set_processor' ):
_snake_case : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return processors
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
_snake_case : Optional[int] = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Union[str, Any] ):
if hasattr(UpperCamelCase , 'set_processor' ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
module.set_processor(UpperCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Union[torch.Tensor, float, int] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[torch.BoolTensor] = None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_snake_case : Dict = hidden_states.shape[0]
_snake_case : str = timestep
if not torch.is_tensor(UpperCamelCase ):
_snake_case : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase ) and len(timesteps.shape ) == 0:
_snake_case : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case : Optional[int] = timesteps * torch.ones(UpperCamelCase , dtype=timesteps.dtype , device=timesteps.device )
_snake_case : Union[str, Any] = self.time_proj(UpperCamelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_snake_case : Tuple = timesteps_projected.to(dtype=self.dtype )
_snake_case : List[Any] = self.time_embedding(UpperCamelCase )
if self.embedding_proj_norm is not None:
_snake_case : Optional[Any] = self.embedding_proj_norm(UpperCamelCase )
_snake_case : Union[str, Any] = self.embedding_proj(UpperCamelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_snake_case : Dict = self.encoder_hidden_states_proj(UpperCamelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_snake_case : str = self.proj_in(UpperCamelCase )
_snake_case : int = self.positional_embedding.to(hidden_states.dtype )
_snake_case : Optional[int] = []
_snake_case : List[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_snake_case : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_snake_case : str = hidden_states[:, None, :]
_snake_case : str = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_snake_case : int = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase , -1 , -1 )
additional_embeds.append(UpperCamelCase )
_snake_case : Optional[int] = torch.cat(
UpperCamelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_snake_case : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_snake_case : Optional[Any] = F.pad(
UpperCamelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_snake_case : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_snake_case : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_snake_case : Tuple = F.pad(UpperCamelCase , (0, self.additional_embeddings) , value=0.0 )
_snake_case : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_snake_case : str = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_snake_case : Tuple = self.norm_in(UpperCamelCase )
for block in self.transformer_blocks:
_snake_case : Any = block(UpperCamelCase , attention_mask=UpperCamelCase )
_snake_case : Dict = self.norm_out(UpperCamelCase )
if self.prd_embedding is not None:
_snake_case : str = hidden_states[:, -1]
else:
_snake_case : Any = hidden_states[:, additional_embeddings_len:]
_snake_case : List[Any] = self.proj_to_clip_embeddings(UpperCamelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 669 | 0 |
'''simple docstring'''
def lowerCamelCase_ ( lowerCAmelCase: List[Any] )-> Dict:
_snake_case : Union[str, Any] = 1
_snake_case : Tuple = 2
while i * i <= n:
_snake_case : str = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowerCamelCase_ ( )-> Any:
_snake_case : Tuple = 1
_snake_case : Any = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCAmelCase ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 717 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase )
if number < 1:
_snake_case : int = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCAmelCase )
_snake_case : int = 1
for i in range(1 , lowerCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCamelCase_ ( lowerCAmelCase: dict , lowerCAmelCase: str , lowerCAmelCase: set , lowerCAmelCase: set , lowerCAmelCase: dict , lowerCAmelCase: dict , lowerCAmelCase: PriorityQueue , lowerCAmelCase: dict , lowerCAmelCase: float | int , )-> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_snake_case : Union[str, Any] = cst_fwd.get(lowerCAmelCase , np.inf )
_snake_case : Tuple = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_snake_case : Dict = new_cost_f
_snake_case : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_snake_case : Optional[int] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: str , lowerCAmelCase: dict , lowerCAmelCase: dict )-> int:
_snake_case : Optional[Any] = -1
_snake_case : Any = set()
_snake_case : Union[str, Any] = set()
_snake_case : List[Any] = {source: 0}
_snake_case : Tuple = {destination: 0}
_snake_case : str = {source: None}
_snake_case : Optional[Any] = {destination: None}
_snake_case : PriorityQueue[Any] = PriorityQueue()
_snake_case : PriorityQueue[Any] = PriorityQueue()
_snake_case : List[Any] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_snake_case : Optional[int] = queue_forward.get()
visited_forward.add(lowerCAmelCase )
_snake_case : Dict = queue_backward.get()
visited_backward.add(lowerCAmelCase )
_snake_case : Any = pass_and_relaxation(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
_snake_case : Optional[Any] = pass_and_relaxation(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_snake_case : Tuple = shortest_distance
return shortest_path_distance
lowerCAmelCase_ = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
lowerCAmelCase_ = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] =VOCAB_FILES_NAMES
a_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
a_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Any =LxmertTokenizer
def __init__( self : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=True , UpperCamelCase : List[str]="[UNK]" , UpperCamelCase : List[Any]="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Optional[int]="[MASK]" , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : List[Any] = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : Optional[int] = do_lower_case
_snake_case : Dict = strip_accents
_snake_case : Optional[int] = tokenize_chinese_chars
_snake_case : Optional[Any] = normalizer_class(**UpperCamelCase )
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : str=None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Tuple = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : int = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 0 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Any = 10
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[Any] = [1, 2, 3, 4]
_snake_case : Optional[int] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCamelCase , self.block_size , 0 ) , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_snake_case : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase , self.block_size , 0 ) , UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_snake_case : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase , self.block_size , 0 ) , UpperCamelCase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Tuple = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
_snake_case : List[str] = process_story(UpperCamelCase )
self.assertEqual(UpperCamelCase , [] )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Optional[int] = ''
_snake_case : Union[str, Any] = process_story(UpperCamelCase )
self.assertEqual(UpperCamelCase , [] )
self.assertEqual(UpperCamelCase , [] )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Any = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
_snake_case : Union[str, Any] = process_story(UpperCamelCase )
_snake_case : Union[str, Any] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(UpperCamelCase , UpperCamelCase )
_snake_case : int = ['It was the best of times.']
self.assertEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = torch.tensor([1, 2, 3, 4] )
_snake_case : Union[str, Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCamelCase , 0 ).numpy() , expected.numpy() )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Optional[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_snake_case : List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase , 23 ).numpy() , expected.numpy() )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_snake_case : str = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase , 1 ).numpy() , expected.numpy() )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Dict = 1_01
_snake_case : Optional[int] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] )
_snake_case : str = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_snake_case : Dict = compute_token_type_ids(UpperCamelCase , UpperCamelCase )
np.testing.assert_array_equal(UpperCamelCase , UpperCamelCase )
| 719 |
from __future__ import annotations
from random import random
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : int | None = None ):
'''simple docstring'''
_snake_case : str = value
_snake_case : List[Any] = random()
_snake_case : Node | None = None
_snake_case : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = str(self.value ) + ' '
_snake_case : List[Any] = str(self.left or '' )
_snake_case : int = str(self.right or '' )
return value + left + right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_snake_case , _snake_case : Optional[Any] = split(root.left , lowerCAmelCase )
return left, root
else:
_snake_case , _snake_case : List[str] = split(root.right , lowerCAmelCase )
return root, right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: Node | None )-> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_snake_case : str = merge(left.right , lowerCAmelCase )
return left
else:
_snake_case : Union[str, Any] = merge(lowerCAmelCase , right.left )
return right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case : Tuple = Node(lowerCAmelCase )
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , lowerCAmelCase )
return merge(merge(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , value - 1 )
_snake_case , _snake_case : List[str] = split(lowerCAmelCase , lowerCAmelCase )
return merge(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None )-> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: str )-> Node | None:
for arg in args.split():
if arg[0] == "+":
_snake_case : List[str] = insert(lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
_snake_case : Any = erase(lowerCAmelCase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowerCamelCase_ ( )-> None:
_snake_case : Tuple = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
_snake_case : List[Any] = input()
while args != "q":
_snake_case : int = interact_treap(lowerCAmelCase , lowerCAmelCase )
print(lowerCAmelCase )
_snake_case : Tuple = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 669 | 0 |
def lowerCamelCase_ ( lowerCAmelCase: str )-> int:
_snake_case : Any = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
_snake_case : str = hex_num[0] == '-'
if is_negative:
_snake_case : List[str] = hex_num[1:]
try:
_snake_case : Union[str, Any] = int(lowerCAmelCase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
_snake_case : Dict = ''
while int_num > 0:
_snake_case : Optional[int] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( lowerCAmelCase: str = N )-> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase , lowerCAmelCase : str(int(lowerCAmelCase ) * int(lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 | 0 |
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: List[str] , lowerCAmelCase: Dict , lowerCAmelCase: Dict )-> str:
if height >= 1:
move_tower(height - 1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
move_disk(lowerCAmelCase , lowerCAmelCase )
move_tower(height - 1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] , lowerCAmelCase: int )-> str:
print('moving disk from' , lowerCAmelCase , 'to' , lowerCAmelCase )
def lowerCamelCase_ ( )-> Union[str, Any]:
_snake_case : Optional[int] = int(input('Height of hanoi: ' ).strip() )
move_tower(lowerCAmelCase , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 721 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase_ ( )-> Any:
_snake_case : List[str] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_snake_case : Optional[Any] = Dataset.from_dict(lowerCAmelCase )
return dataset
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = get_dataset()
_snake_case : Tuple = make_duplicate_clusters(UpperCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = get_dataset()
_snake_case , _snake_case : str = deduplicate_dataset(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 2 )
print(UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCamelCase )
| 669 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.