code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def a__ ( __UpperCamelCase , __UpperCamelCase=False ):
SCREAMING_SNAKE_CASE_ = OmegaConf.load(__UpperCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(__UpperCamelCase ) ) )
return config
def a__ ( __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None ):
if conf_path is None:
SCREAMING_SNAKE_CASE_ = "./model_checkpoints/vqgan_only.yaml"
SCREAMING_SNAKE_CASE_ = load_config(__UpperCamelCase , display=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = VQModel(**config.model.params )
if ckpt_path is None:
SCREAMING_SNAKE_CASE_ = "./model_checkpoints/vqgan_only.pt"
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )
if ".ckpt" in ckpt_path:
SCREAMING_SNAKE_CASE_ = sd["state_dict"]
model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
model.to(__UpperCamelCase )
del sd
return model
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model.encode(__UpperCamelCase )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
SCREAMING_SNAKE_CASE_ = model.decode(__UpperCamelCase )
return xrec
def a__ ( __UpperCamelCase , __UpperCamelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = string.rsplit("." , 1 )
if reload:
SCREAMING_SNAKE_CASE_ = importlib.import_module(__UpperCamelCase )
importlib.reload(__UpperCamelCase )
return getattr(importlib.import_module(__UpperCamelCase , package=__UpperCamelCase ) , cls )
def a__ ( __UpperCamelCase ):
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=True , __UpperCamelCase=True ):
SCREAMING_SNAKE_CASE_ = instantiate_from_config(__UpperCamelCase )
if sd is not None:
model.load_state_dict(__UpperCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
# load the specified checkpoint
if ckpt:
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase , map_location="cpu" )
SCREAMING_SNAKE_CASE_ = pl_sd["global_step"]
print(F'''loaded model from global step {global_step}.''' )
else:
SCREAMING_SNAKE_CASE_ = {"state_dict": None}
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=__UpperCamelCase , eval_mode=__UpperCamelCase )["model"]
return model, global_step
| 118 | import math
from datetime import datetime, timedelta
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = year % 1_9
SCREAMING_SNAKE_CASE_ = year % 4
SCREAMING_SNAKE_CASE_ = year % 7
SCREAMING_SNAKE_CASE_ = math.floor(year / 1_0_0 )
SCREAMING_SNAKE_CASE_ = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
SCREAMING_SNAKE_CASE_ = leap_day_inhibits / 4
SCREAMING_SNAKE_CASE_ = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
SCREAMING_SNAKE_CASE_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
SCREAMING_SNAKE_CASE_ = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
SCREAMING_SNAKE_CASE_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(__UpperCamelCase , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(__UpperCamelCase , 4 , 1_8 )
else:
return datetime(__UpperCamelCase , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
A : Dict = "will be" if year > datetime.now().year else "was"
print(f"Easter in {year} {tense} {gauss_easter(year)}")
| 118 | 1 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int = 100_0000 ) -> int:
'''simple docstring'''
_UpperCAmelCase = limit + 1
_UpperCAmelCase = [0] * limit
for first_term in range(1 , __lowercase ):
for n in range(__lowercase , __lowercase , __lowercase ):
_UpperCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_UpperCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"{solution() = }")
| 156 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str = "cpu" , __lowercase : Union[str, None] = None ) -> None:
'''simple docstring'''
_UpperCAmelCase = torch.load(__lowercase , map_location=__lowercase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowercase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_UpperCAmelCase = v.half()
if save_path is None: # overwrite src_path
_UpperCAmelCase = src_path
torch.save(__lowercase , __lowercase )
if __name__ == "__main__":
fire.Fire(convert)
| 156 | 1 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
_lowerCAmelCase = TypeVar('''T''')
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return (position - 1) // 2
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return (2 * position) + 1
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return (2 * position) + 2
class lowerCAmelCase_( Generic[T] ):
'''simple docstring'''
def __init__( self ) -> None:
lowerCAmelCase__ : list[tuple[T, int]] = []
lowerCAmelCase__ : dict[T, int] = {}
lowerCAmelCase__ : int = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def UpperCAmelCase_ ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
lowerCAmelCase__ : str = self.elements
self.elements += 1
self._bubble_up(__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 ,self.elements - 1 )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowerCAmelCase__ , lowerCAmelCase__ : str = self.heap[0]
self._bubble_down(__UpperCAmelCase )
return elem
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
# Update the weight of the given key
lowerCAmelCase__ : int = self.position_map[elem]
lowerCAmelCase__ : List[Any] = (elem, weight)
if position > 0:
lowerCAmelCase__ : Dict = get_parent_position(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__UpperCAmelCase )
else:
self._bubble_down(__UpperCAmelCase )
else:
self._bubble_down(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
lowerCAmelCase__ : List[str] = self.position_map[elem]
if curr_pos == 0:
return None
lowerCAmelCase__ : int = get_parent_position(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.heap[curr_pos]
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__UpperCAmelCase ,__UpperCAmelCase )
return self._bubble_up(__UpperCAmelCase )
return None
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
lowerCAmelCase__ : List[Any] = self.position_map[elem]
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.heap[curr_pos]
lowerCAmelCase__ : str = get_child_left_position(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = get_child_right_position(__UpperCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.heap[child_left_position]
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__UpperCAmelCase ,__UpperCAmelCase )
return self._bubble_down(__UpperCAmelCase )
if child_left_position < self.elements:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__UpperCAmelCase ,__UpperCAmelCase )
return self._bubble_down(__UpperCAmelCase )
else:
return None
if child_right_position < self.elements:
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__UpperCAmelCase ,__UpperCAmelCase )
return self._bubble_down(__UpperCAmelCase )
return None
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
# Swap the nodes at the given positions
lowerCAmelCase__ : str = self.heap[nodea_pos][0]
lowerCAmelCase__ : Dict = self.heap[nodea_pos][0]
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowerCAmelCase__ : int = nodea_pos
lowerCAmelCase__ : int = nodea_pos
class lowerCAmelCase_( Generic[T] ):
'''simple docstring'''
def __init__( self ) -> None:
lowerCAmelCase__ : dict[T, dict[T, int]] = {}
lowerCAmelCase__ : int = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
lowerCAmelCase__ : Optional[int] = {}
self.nodes += 1
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__UpperCAmelCase )
self.add_node(__UpperCAmelCase )
lowerCAmelCase__ : Any = weight
lowerCAmelCase__ : Tuple = weight
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , ):
"""simple docstring"""
lowerCAmelCase__ : dict[T, int] = {node: maxsize for node in graph.connections}
lowerCAmelCase__ : dict[T, T | None] = {node: None for node in graph.connections}
lowerCAmelCase__ : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(UpperCamelCase , UpperCamelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
lowerCAmelCase__ : List[Any] = priority_queue.extract_min()
lowerCAmelCase__ : str = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase__ : Any = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCamelCase , dist[neighbour] )
lowerCAmelCase__ : List[str] = node
# running prim's algorithm
while not priority_queue.is_empty():
lowerCAmelCase__ : Any = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase__ : Optional[int] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCamelCase , dist[neighbour] )
lowerCAmelCase__ : Optional[int] = node
return dist, parent
| 37 |
from collections.abc import Callable
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ = None ):
# Stores actual heap items.
lowercase : list = []
# Stores indexes of each item for supporting updates and deletion.
lowercase : dict = {}
# Stores current size of heap.
lowercase : str = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowercase : Tuple = key or (lambda SCREAMING_SNAKE_CASE__ : x)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Dict = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowercase , lowercase : int = self.arr[j], self.arr[i]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : int = self._left(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self._right(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = i
if left is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = left
if right is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = right
return valid_parent
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = self._parent(SCREAMING_SNAKE_CASE__ )
while parent is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Optional[int] = parent, self._parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
while valid_parent != index:
self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : str = valid_parent, self._get_valid_parent(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : str = self.pos_map[item]
lowercase : Optional[int] = [item, self.key(SCREAMING_SNAKE_CASE__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if item not in self.pos_map:
return
lowercase : List[str] = self.pos_map[item]
del self.pos_map[item]
lowercase : Optional[int] = self.arr[self.size - 1]
lowercase : int = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(SCREAMING_SNAKE_CASE__ )
self._heapify_down(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(SCREAMING_SNAKE_CASE__ )] )
else:
lowercase : int = [item, self.key(SCREAMING_SNAKE_CASE__ )]
lowercase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
lowercase : str = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __lowercase ( ) ->None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class __UpperCAmelCase ( lowercase__ ):
'''simple docstring'''
__lowerCAmelCase = '''data2vec-vision'''
def __init__(self : List[Any] , _lowerCAmelCase : Optional[Any]=768 , _lowerCAmelCase : Optional[int]=12 , _lowerCAmelCase : Any=12 , _lowerCAmelCase : Dict=3072 , _lowerCAmelCase : Tuple="gelu" , _lowerCAmelCase : Union[str, Any]=0.0 , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : int=1e-12 , _lowerCAmelCase : Union[str, Any]=224 , _lowerCAmelCase : Optional[Any]=16 , _lowerCAmelCase : int=3 , _lowerCAmelCase : Dict=False , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : str=[3, 5, 7, 11] , _lowerCAmelCase : Any=[1, 2, 3, 6] , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Dict=0.4 , _lowerCAmelCase : Optional[Any]=256 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : Dict=False , _lowerCAmelCase : int=255 , **_lowerCAmelCase : List[str] , ):
super().__init__(**_a )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = use_mask_token
A = use_absolute_position_embeddings
A = use_relative_position_bias
A = use_shared_relative_position_bias
A = layer_scale_init_value
A = drop_path_rate
A = use_mean_pooling
# decode head attributes (semantic segmentation)
A = out_indices
A = pool_scales
# auxiliary head attributes (semantic segmentation)
A = use_auxiliary_head
A = auxiliary_loss_weight
A = auxiliary_channels
A = auxiliary_num_convs
A = auxiliary_concat_input
A = semantic_loss_ignore_index
class __UpperCAmelCase ( lowercase__ ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def A (self : Optional[int] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A (self : Union[str, Any] ):
return 1e-4
| 355 |
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
return [ord(UpperCAmelCase ) - 96 for elem in plain]
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def __a ( ) ->None:
"""simple docstring"""
A = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , UpperCAmelCase )
print("""Decoded:""" , decode(UpperCAmelCase ) )
if __name__ == "__main__":
main()
| 337 | 0 |
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = ''
for word_or_phrase in separated:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(_lowerCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 187 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 208 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase :
def __init__( self : Union[str, Any] , snake_case : int , snake_case : Union[str, Any]=3 , snake_case : List[Any]=3_2 , snake_case : Any=3 , snake_case : Dict=1_0 , snake_case : List[str]=[1_0, 2_0, 3_0, 4_0] , snake_case : List[str]=[1, 1, 2, 1] , snake_case : List[Any]=True , snake_case : Union[str, Any]=True , snake_case : Dict="relu" , snake_case : Any=3 , snake_case : List[Any]=None , ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : List[str] = parent
UpperCamelCase_ : Any = batch_size
UpperCamelCase_ : Union[str, Any] = image_size
UpperCamelCase_ : Any = num_channels
UpperCamelCase_ : str = embeddings_size
UpperCamelCase_ : str = hidden_sizes
UpperCamelCase_ : str = depths
UpperCamelCase_ : str = is_training
UpperCamelCase_ : Union[str, Any] = use_labels
UpperCamelCase_ : List[str] = hidden_act
UpperCamelCase_ : List[Any] = num_labels
UpperCamelCase_ : int = scope
UpperCamelCase_ : Tuple = len(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : Optional[int] = None
if self.use_labels:
UpperCamelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : int , snake_case : Dict , snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Any = TFResNetModel(config=snake_case )
UpperCamelCase_ : Any = model(snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Any , snake_case : Optional[int] , snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Dict = self.num_labels
UpperCamelCase_ : List[Any] = TFResNetForImageClassification(snake_case )
UpperCamelCase_ : Union[str, Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase_ : List[Any] = config_and_inputs
UpperCamelCase_ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowercase = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = TFResNetModelTester(self )
UpperCamelCase_ : List[str] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple:
"""simple docstring"""
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Optional[Any] = model_class(snake_case )
UpperCamelCase_ : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Tuple = [*signature.parameters.keys()]
UpperCamelCase_ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
"""simple docstring"""
def check_hidden_states_output(snake_case : Optional[int] , snake_case : str , snake_case : Optional[Any] ):
UpperCamelCase_ : Union[str, Any] = model_class(snake_case )
UpperCamelCase_ : Dict = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCamelCase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase_ : List[str] = self.model_tester.num_stages
self.assertEqual(len(snake_case ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : Optional[Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase_ : Optional[Any] = layer_type
UpperCamelCase_ : Any = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ : Optional[int] = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : str = TFResNetModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def __lowercase ( ):
UpperCamelCase_ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Dict = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase_ : Union[str, Any] = self.default_image_processor
UpperCamelCase_ : int = prepare_img()
UpperCamelCase_ : Union[str, Any] = image_processor(images=snake_case , return_tensors='tf' )
# forward pass
UpperCamelCase_ : Any = model(**snake_case )
# verify the logits
UpperCamelCase_ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCamelCase_ : Tuple = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case , atol=1e-4 ) )
| 367 | from __future__ import annotations
import numpy as np
def __lowercase ( lowerCamelCase : list[float] ):
return np.maximum(0 , lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 50 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 92 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ : Any = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = [
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : str = [
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 121 | 0 |
'''simple docstring'''
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class A__ ( lowerCamelCase__ ):
def __init__( self : Tuple , _a : int ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =data
def __iter__( self : List[str] ) -> Dict:
'''simple docstring'''
for element in self.data:
yield element
def _lowerCAmelCase ( _UpperCamelCase : Dict=True ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Accelerator(even_batches=_A )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : Dict = False ) -> int:
"""simple docstring"""
if iterable:
_SCREAMING_SNAKE_CASE =DummyIterableDataset(torch.as_tensor(range(_A ) ) )
else:
_SCREAMING_SNAKE_CASE =TensorDataset(torch.as_tensor(range(_A ) ) )
_SCREAMING_SNAKE_CASE =DataLoader(_A , batch_size=_A )
_SCREAMING_SNAKE_CASE =accelerator.prepare(_A )
return dl
def _lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =create_dataloader(accelerator=_A , dataset_size=_A , batch_size=_A )
_SCREAMING_SNAKE_CASE =[len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
_A , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
_A , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =create_accelerator(even_batches=_A )
verify_dataloader_batch_sizes(
_A , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
_A , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _lowerCAmelCase ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =create_accelerator(even_batches=_A )
_SCREAMING_SNAKE_CASE =torch.nn.Linear(1 , 1 )
_SCREAMING_SNAKE_CASE =accelerator.prepare(_A )
_SCREAMING_SNAKE_CASE =create_dataloader(_A , dataset_size=3 , batch_size=1 )
_SCREAMING_SNAKE_CASE =[]
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(_A ):
_SCREAMING_SNAKE_CASE =ddp_model(batch[0].float() )
_SCREAMING_SNAKE_CASE =output.sum()
loss.backward()
batch_idxs.append(_A )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _lowerCAmelCase ( _UpperCamelCase : Any ) -> Dict:
"""simple docstring"""
with warnings.catch_warnings(record=_A ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , _A )
assert "only supported for multi-GPU" in str(w[-1].message )
def _lowerCAmelCase ( ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =create_accelerator(even_batches=_A )
_SCREAMING_SNAKE_CASE =torch.nn.Linear(1 , 1 )
_SCREAMING_SNAKE_CASE =accelerator.prepare(_A )
_SCREAMING_SNAKE_CASE =create_dataloader(_A , dataset_size=3 , batch_size=1 )
_SCREAMING_SNAKE_CASE =create_dataloader(_A , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_A ):
_SCREAMING_SNAKE_CASE =train_dl.batch_sampler.even_batches
_SCREAMING_SNAKE_CASE =valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =create_accelerator(even_batches=_A )
_SCREAMING_SNAKE_CASE =torch.nn.Linear(1 , 1 )
_SCREAMING_SNAKE_CASE =accelerator.prepare(_A )
create_dataloader(_A , dataset_size=3 , batch_size=1 , iterable=_A )
_SCREAMING_SNAKE_CASE =create_dataloader(_A , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_A ):
_SCREAMING_SNAKE_CASE =batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =create_accelerator()
_SCREAMING_SNAKE_CASE =torch.nn.Linear(1 , 1 )
_SCREAMING_SNAKE_CASE =accelerator.prepare(_A )
create_dataloader(_A , dataset_size=3 , batch_size=1 , iterable=_A )
with warnings.catch_warnings(record=_A ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_A ):
pass
assert issubclass(w[-1].category , _A )
assert "only supported for map-style datasets" in str(w[-1].message )
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
_SCREAMING_SNAKE_CASE =accelerator.state.distributed_type
_SCREAMING_SNAKE_CASE =DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(_A )
_SCREAMING_SNAKE_CASE =original_state
if __name__ == "__main__":
main()
| 358 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Optional[int] = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 114 | 0 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowerCAmelCase : Any = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=None ) -> Optional[Any]:
require_version(deps[pkg] , __lowerCAmelCase )
| 156 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
# Initialise PyTorch model
__lowercase : Tuple = RemBertConfig.from_json_file(__lowerCAmelCase )
print('''Building PyTorch model from configuration: {}'''.format(str(__lowerCAmelCase ) ) )
__lowercase : Union[str, Any] = RemBertModel(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(__lowerCAmelCase ) )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : List[str] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 156 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class a__ ( unittest.TestCase ):
def lowercase ( self : Union[str, Any] ) -> int:
lowercase : Dict = tempfile.mkdtemp()
# fmt: off
lowercase : List[str] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
lowercase : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowercase : str = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
lowercase : Any = os.path.join(self.tmpdirname, lowerCAmelCase )
with open(self.image_processor_file, 'w', encoding='utf-8' ) as fp:
json.dump(lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : str, **lowerCAmelCase : str ) -> Tuple:
return BertTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase ( self : Dict, **lowerCAmelCase : List[Any] ) -> Optional[Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase ( self : int ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def lowercase ( self : str ) -> Dict:
lowercase : List[str] = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
lowercase : Tuple = [Image.fromarray(np.moveaxis(lowerCAmelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : Optional[int] ) -> List[Any]:
lowercase : List[Any] = self.get_tokenizer()
lowercase : str = self.get_image_processor()
lowercase : Any = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowercase : Dict = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer, (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCAmelCase )
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase : str = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase : List[Any] = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)' )
lowercase : str = self.get_image_processor(do_normalize=lowerCAmelCase, padding_value=1.0 )
lowercase : Dict = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowerCAmelCase, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCAmelCase )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
lowercase : Optional[int] = self.get_image_processor()
lowercase : List[Any] = self.get_tokenizer()
lowercase : Any = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase )
lowercase : List[str] = self.prepare_image_inputs()
lowercase : Optional[Any] = image_processor(lowerCAmelCase, return_tensors='np' )
lowercase : str = processor(images=lowerCAmelCase, return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def lowercase ( self : Optional[Any] ) -> Tuple:
lowercase : Any = self.get_image_processor()
lowercase : Any = self.get_tokenizer()
lowercase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase )
lowercase : Optional[int] = """lower newer"""
lowercase : int = processor(text=lowerCAmelCase )
lowercase : Optional[Any] = tokenizer(lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase ( self : int ) -> List[str]:
lowercase : Optional[int] = self.get_image_processor()
lowercase : List[str] = self.get_tokenizer()
lowercase : str = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase )
lowercase : Optional[Any] = """lower newer"""
lowercase : Optional[Any] = self.prepare_image_inputs()
lowercase : List[str] = processor(text=lowerCAmelCase, images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ), ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(lowerCAmelCase ):
processor()
def lowercase ( self : Optional[int] ) -> int:
lowercase : str = self.get_image_processor()
lowercase : Union[str, Any] = self.get_tokenizer()
lowercase : str = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase )
lowercase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase : List[str] = processor.batch_decode(lowerCAmelCase )
lowercase : str = tokenizer.batch_decode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : Any ) -> Optional[int]:
lowercase : Optional[int] = self.get_image_processor()
lowercase : Any = self.get_tokenizer()
lowercase : List[Any] = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase )
lowercase : List[str] = """lower newer"""
lowercase : str = self.prepare_image_inputs()
lowercase : Optional[int] = processor(text=lowerCAmelCase, images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
| 356 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_UpperCamelCase: Dict = logging.get_logger(__name__)
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
'''simple docstring'''
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None ) -> str:
'''simple docstring'''
lowercase : str = tesseract_config if tesseract_config is not None else ''
# apply OCR
lowercase : Tuple = to_pil_image(_UpperCAmelCase )
lowercase , lowercase : Union[str, Any] = pil_image.size
lowercase : Union[str, Any] = pytesseract.image_to_data(_UpperCAmelCase , lang=_UpperCAmelCase , output_type='dict' , config=_UpperCAmelCase )
lowercase , lowercase , lowercase , lowercase , lowercase : str = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
lowercase : str = [idx for idx, word in enumerate(_UpperCAmelCase ) if not word.strip()]
lowercase : Dict = [word for idx, word in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
lowercase : Union[str, Any] = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
lowercase : str = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
lowercase : Any = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
lowercase : Dict = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase : int = []
for x, y, w, h in zip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase : Any = [x, y, x + w, y + h]
actual_boxes.append(_UpperCAmelCase )
# finally, normalize the bounding boxes
lowercase : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = ['pixel_values']
def __init__( self : str, lowerCAmelCase : bool = True, lowerCAmelCase : Dict[str, int] = None, lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR, lowerCAmelCase : bool = True, lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Optional[str] = "", **lowerCAmelCase : List[Any], ) -> None:
super().__init__(**lowerCAmelCase )
lowercase : Optional[Any] = size if size is not None else {'height': 224, 'width': 224}
lowercase : List[Any] = get_size_dict(lowerCAmelCase )
lowercase : str = do_resize
lowercase : List[str] = size
lowercase : int = resample
lowercase : List[str] = apply_ocr
lowercase : str = ocr_lang
lowercase : Union[str, Any] = tesseract_config
def lowercase ( self : Optional[Any], lowerCAmelCase : np.ndarray, lowerCAmelCase : Dict[str, int], lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR, lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCAmelCase : int, ) -> np.ndarray:
lowercase : Optional[Any] = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowercase : Optional[int] = (size['height'], size['width'])
return resize(lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : Any, lowerCAmelCase : ImageInput, lowerCAmelCase : bool = None, lowerCAmelCase : Dict[str, int] = None, lowerCAmelCase : PILImageResampling = None, lowerCAmelCase : bool = None, lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Optional[Union[str, TensorType]] = None, lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST, **lowerCAmelCase : List[Any], ) -> PIL.Image.Image:
lowercase : Any = do_resize if do_resize is not None else self.do_resize
lowercase : Union[str, Any] = size if size is not None else self.size
lowercase : Dict = get_size_dict(lowerCAmelCase )
lowercase : List[str] = resample if resample is not None else self.resample
lowercase : str = apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase : Tuple = ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase : List[str] = tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase : Optional[int] = make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
lowercase : int = [to_numpy_array(lowerCAmelCase ) for image in images]
if apply_ocr:
requires_backends(self, 'pytesseract' )
lowercase : str = []
lowercase : Dict = []
for image in images:
lowercase , lowercase : List[str] = apply_tesseract(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
words_batch.append(lowerCAmelCase )
boxes_batch.append(lowerCAmelCase )
if do_resize:
lowercase : str = [self.resize(image=lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowercase : Any = [flip_channel_order(lowerCAmelCase ) for image in images]
lowercase : Dict = [to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images]
lowercase : Tuple = BatchFeature(data={'pixel_values': images}, tensor_type=lowerCAmelCase )
if apply_ocr:
lowercase : List[Any] = words_batch
lowercase : Tuple = boxes_batch
return data
| 53 | 0 |
import requests
__a = '''YOUR API KEY'''
def __lowercase ( _UpperCamelCase, _UpperCamelCase = giphy_api_key ) ->list:
"""simple docstring"""
lowercase : Dict = '''+'''.join(query.split() )
lowercase : List[str] = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
lowercase : str = requests.get(_UpperCamelCase ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('''\n'''.join(get_gifs('''space ship''')))
| 337 |
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = [False] * len(_UpperCamelCase )
lowercase : Optional[int] = []
queue.append(_UpperCamelCase )
lowercase : Union[str, Any] = True
while queue:
lowercase : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
lowercase : Tuple = True
lowercase : Optional[Any] = u
return visited[t]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : List[str] = [-1] * (len(_UpperCamelCase ))
lowercase : int = 0
while bfs(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : List[str] = float('''Inf''' )
lowercase : int = sink
while s != source:
# Find the minimum value in select path
lowercase : List[Any] = min(_UpperCamelCase, graph[parent[s]][s] )
lowercase : Union[str, Any] = parent[s]
max_flow += path_flow
lowercase : Optional[int] = sink
while v != source:
lowercase : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Union[str, Any] = parent[v]
return max_flow
__a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__a , __a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 337 | 1 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A , A ) -> int:
if len(A ) < k or k < 0:
raise ValueError('''Invalid Input''' )
lowerCAmelCase__ = lowerCAmelCase__ = sum(array[:k] )
for i in range(len(A ) - k ):
lowerCAmelCase__ = current_sum - array[i] + array[i + k]
lowerCAmelCase__ = max(A , A )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__UpperCAmelCase = [randint(-1_000, 1_000) for i in range(100)]
__UpperCAmelCase = randint(0, 110)
print(f"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""") | 228 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A , A ) -> float:
lowerCAmelCase__ = sorted(numsa + numsa )
lowerCAmelCase__ , lowerCAmelCase__ = divmod(len(A ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = [float(x) for x in input('''Enter the elements of first array: ''').split()]
__UpperCAmelCase = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""") | 228 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowercase ( __UpperCamelCase ):
_a = "deformable_detr"
_a = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , _a=True , _a=None , _a=3 , _a=300 , _a=1024 , _a=6 , _a=1024 , _a=8 , _a=6 , _a=1024 , _a=8 , _a=0.0 , _a=True , _a="relu" , _a=256 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.02 , _a=1.0 , _a=True , _a=False , _a="sine" , _a="resnet50" , _a=True , _a=False , _a=4 , _a=4 , _a=4 , _a=False , _a=300 , _a=False , _a=1 , _a=5 , _a=2 , _a=1 , _a=1 , _a=5 , _a=2 , _a=0.1 , _a=0.25 , _a=False , **_a , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can\'t specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_A : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=["""stage4"""] )
elif isinstance(_a , _a ):
_A : Optional[int] = backbone_config.get("""model_type""" )
_A : int = CONFIG_MAPPING[backbone_model_type]
_A : Union[str, Any] = config_class.from_dict(_a )
_A : Optional[Any] = use_timm_backbone
_A : List[Any] = backbone_config
_A : Union[str, Any] = num_channels
_A : str = num_queries
_A : Any = max_position_embeddings
_A : Optional[Any] = d_model
_A : List[str] = encoder_ffn_dim
_A : Optional[Any] = encoder_layers
_A : Tuple = encoder_attention_heads
_A : Optional[int] = decoder_ffn_dim
_A : List[Any] = decoder_layers
_A : Optional[Any] = decoder_attention_heads
_A : str = dropout
_A : Any = attention_dropout
_A : Any = activation_dropout
_A : Tuple = activation_function
_A : Union[str, Any] = init_std
_A : str = init_xavier_std
_A : Optional[int] = encoder_layerdrop
_A : Union[str, Any] = auxiliary_loss
_A : List[Any] = position_embedding_type
_A : List[Any] = backbone
_A : int = use_pretrained_backbone
_A : List[str] = dilation
# deformable attributes
_A : Any = num_feature_levels
_A : Optional[int] = encoder_n_points
_A : Union[str, Any] = decoder_n_points
_A : Union[str, Any] = two_stage
_A : str = two_stage_num_proposals
_A : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
_A : int = class_cost
_A : str = bbox_cost
_A : Optional[int] = giou_cost
# Loss coefficients
_A : int = mask_loss_coefficient
_A : int = dice_loss_coefficient
_A : Tuple = bbox_loss_coefficient
_A : Union[str, Any] = giou_loss_coefficient
_A : Any = eos_coefficient
_A : Any = focal_alpha
_A : Tuple = disable_custom_kernels
super().__init__(is_encoder_decoder=_a , **_a )
@property
def a__ ( self ) -> int:
return self.encoder_attention_heads
@property
def a__ ( self ) -> int:
return self.d_model
def a__ ( self ) -> List[Any]:
_A : int = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_A : Union[str, Any] = self.backbone_config.to_dict()
_A : Dict = self.__class__.model_type
return output
| 26 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_UpperCAmelCase : str = pytest.mark.integration
@require_faiss
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : List[Any] ) -> Union[str, Any]:
lowerCamelCase__ : List[Any] = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def A_ ( self : Optional[Any] ) -> Optional[int]:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
lowerCamelCase__ : List[Any] = dset.map(
lambda UpperCAmelCase , UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCAmelCase , keep_in_memory=UpperCAmelCase )
lowerCamelCase__ : Tuple = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase__ , lowerCamelCase__ : str = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def A_ ( self : Union[str, Any] ) -> int:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : List[str] ) -> Tuple:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ , lowerCamelCase__ : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(UpperCAmelCase , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def A_ ( self : Dict ) -> Dict:
from elasticsearch import Elasticsearch
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : List[Any] = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase__ : int = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCamelCase__ : List[str] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Dict = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Any ) -> Dict:
import faiss
lowerCamelCase__ : Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase__ : int = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Any = 1
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = index.search(UpperCAmelCase )
self.assertRaises(UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase__ : str = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase__ , lowerCamelCase__ : Dict = index.search_batch(UpperCAmelCase )
self.assertRaises(UpperCAmelCase , index.search_batch , queries[0] )
lowerCamelCase__ : str = [scores[0] for scores in total_scores]
lowerCamelCase__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[Any]:
import faiss
lowerCamelCase__ : Any = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase__ : Tuple = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCAmelCase ):
lowerCamelCase__ : List[str] = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def A_ ( self : List[str] ) -> Optional[int]:
import faiss
lowerCamelCase__ : Optional[Any] = faiss.IndexFlat(5 )
lowerCamelCase__ : int = FaissIndex(custom_index=UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def A_ ( self : Any ) -> Optional[int]:
import faiss
lowerCamelCase__ : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase__ : List[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ : List[str] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ , lowerCamelCase__ : str = index.search(UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Any:
import faiss
lowerCamelCase__ : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase__ : Optional[int] = 'index.faiss'
lowerCamelCase__ : Optional[Any] = F"""mock://{index_name}"""
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCamelCase__ : Tuple = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCamelCase__ : Optional[int] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Dict = 1
lowerCamelCase__ , lowerCamelCase__ : str = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Dict ) -> List[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : Any = Elasticsearch()
lowerCamelCase__ : Tuple = {'acknowledged': True}
lowerCamelCase__ : Tuple = ElasticSearchIndex(es_client=UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCamelCase__ : Optional[int] = 'foo'
lowerCamelCase__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = index.search(UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase__ : Any = 'foo'
lowerCamelCase__ : List[str] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : Tuple = index.search(UpperCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase__ : List[str] = ['foo', 'bar', 'foobar']
lowerCamelCase__ : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : str = index.search_batch(UpperCAmelCase )
lowerCamelCase__ : List[str] = [scores[0] for scores in total_scores]
lowerCamelCase__ : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase )
# batched queries with timeout
lowerCamelCase__ : str = ['foo', 'bar', 'foobar']
lowerCamelCase__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = index.search_batch(UpperCAmelCase , request_timeout=30 )
lowerCamelCase__ : Optional[Any] = [scores[0] for scores in total_scores]
lowerCamelCase__ : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase )
| 50 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
A: Optional[Any] = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
__lowerCAmelCase : int = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCAmelCase : List[str] = field(
default=__snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCAmelCase : Tuple = field(
default=__snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCAmelCase : List[Any] = field(
default=__snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowerCAmelCase : Any = field(default=__snake_case , metadata={'help': 'Whether tp freeze the encoder.'} )
__lowerCAmelCase : Optional[int] = field(default=__snake_case , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
__lowerCAmelCase : Tuple = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__lowerCAmelCase : Tuple = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
__lowerCAmelCase : List[str] = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCAmelCase : Optional[Any] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCAmelCase : Dict = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
__lowerCAmelCase : Any = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCAmelCase : Dict = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
__lowerCAmelCase : Dict = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
__lowerCAmelCase : int = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
__lowerCAmelCase : Union[str, Any] = field(default=__snake_case , metadata={'help': 'Source language id for translation.'} )
__lowerCAmelCase : str = field(default=__snake_case , metadata={'help': 'Target language id for translation.'} )
__lowerCAmelCase : List[Any] = field(default=__snake_case , metadata={'help': '# num_beams to use for evaluation.'} )
__lowerCAmelCase : int = field(
default=__snake_case , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : Tuple ):
logger.info(F"***** {split} metrics *****" )
for key in sorted(metrics.keys() ):
logger.info(F" {key} = {metrics[key]}" )
save_json(_snake_case , os.path.join(_snake_case , F"{split}_results.json" ) )
def _snake_case ( ):
UpperCAmelCase : int = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_snake_case )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , _snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase : List[str] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(_snake_case , _snake_case , _snake_case ):
assert hasattr(_snake_case , _snake_case ), F"({config.__class__.__name__}) doesn't have a `{p}` attribute"
setattr(_snake_case , _snake_case , getattr(_snake_case , _snake_case ) )
UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=_snake_case , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_snake_case , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCAmelCase : Any = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_snake_case , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_snake_case , _snake_case ):
UpperCAmelCase : Optional[Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_snake_case )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCAmelCase : Union[str, Any] = SeqaSeqDataset
# Get datasets
UpperCAmelCase : Optional[int] = (
dataset_class(
_snake_case , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
UpperCAmelCase : Any = (
dataset_class(
_snake_case , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCAmelCase : str = (
dataset_class(
_snake_case , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCAmelCase : List[str] = (
build_compute_metrics_fn(data_args.task , _snake_case ) if training_args.predict_with_generate else None
)
UpperCAmelCase : int = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , data_args=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , data_collator=SeqaSeqDataCollator(
_snake_case , _snake_case , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_snake_case , tokenizer=_snake_case , )
UpperCAmelCase : str = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
UpperCAmelCase : Tuple = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCAmelCase : Union[str, Any] = train_result.metrics
UpperCAmelCase : Optional[Any] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , _snake_case , training_args.output_dir )
all_metrics.update(_snake_case )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCAmelCase : Tuple = trainer.evaluate(metric_key_prefix="""val""" )
UpperCAmelCase : int = data_args.n_val
UpperCAmelCase : List[Any] = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , _snake_case , training_args.output_dir )
all_metrics.update(_snake_case )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
UpperCAmelCase : str = trainer.predict(test_dataset=_snake_case , metric_key_prefix="""test""" )
UpperCAmelCase : Tuple = test_output.metrics
UpperCAmelCase : Tuple = data_args.n_test
if trainer.is_world_process_zero():
UpperCAmelCase : Dict = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , _snake_case , training_args.output_dir )
all_metrics.update(_snake_case )
if training_args.predict_with_generate:
UpperCAmelCase : Optional[int] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )
UpperCAmelCase : Optional[int] = lmap(str.strip , _snake_case )
write_txt_file(_snake_case , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(_snake_case , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def _snake_case ( UpperCamelCase : Tuple ):
main()
if __name__ == "__main__":
main()
| 352 |
"""simple docstring"""
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : List[str]=[] ):
UpperCAmelCase : List[Any] = size[0] - overlap_pixels * 2
UpperCAmelCase : Dict = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
UpperCAmelCase : Union[str, Any] = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
UpperCAmelCase : Any = np.pad(UpperCamelCase , mode="""linear_ramp""" , pad_width=UpperCamelCase , end_values=0 )
if "l" in remove_borders:
UpperCAmelCase : Dict = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
UpperCAmelCase : Optional[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
UpperCAmelCase : Any = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
UpperCAmelCase : str = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] ):
return max(UpperCamelCase , min(UpperCamelCase , UpperCamelCase ) )
def _snake_case ( UpperCamelCase : [int] , UpperCamelCase : [int] , UpperCamelCase : [int] ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _snake_case ( UpperCamelCase : [int] , UpperCamelCase : int , UpperCamelCase : [int] ):
UpperCAmelCase : Optional[Any] = list(UpperCamelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
UpperCAmelCase : List[str] = clamp_rect(UpperCamelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Dict ):
UpperCAmelCase : Dict = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(UpperCamelCase , (original_slice, 0) )
return result
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] ):
UpperCAmelCase : List[Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
UpperCAmelCase : List[Any] = tile.crop(UpperCamelCase )
return tile
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] ):
UpperCAmelCase : Union[str, Any] = n % d
return n - divisor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 350 , ) -> List[Any]:
'''simple docstring'''
super().__init__(
vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , low_res_scheduler=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , max_noise_level=_SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : List[str] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
UpperCAmelCase : Any = add_overlap_rect(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , image.size )
UpperCAmelCase : int = image.crop(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
UpperCAmelCase : Dict = translated_slice_x - (original_image_slice / 2)
UpperCAmelCase : Optional[int] = max(0 , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = squeeze_tile(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = to_input.size
UpperCAmelCase : Dict = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
UpperCAmelCase : Optional[Any] = super(_SCREAMING_SNAKE_CASE , self ).__call__(image=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).images[0]
UpperCAmelCase : int = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
UpperCAmelCase : Union[str, Any] = unsqueeze_tile(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
UpperCAmelCase : int = []
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
UpperCAmelCase : Any = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=_SCREAMING_SNAKE_CASE ) , mode="""L""" , )
final_image.paste(
_SCREAMING_SNAKE_CASE , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , _SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 75 , _SCREAMING_SNAKE_CASE = 9.0 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 128 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 32 , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) )
UpperCAmelCase : List[str] = math.ceil(image.size[0] / tile_size )
UpperCAmelCase : Optional[Any] = math.ceil(image.size[1] / tile_size )
UpperCAmelCase : Tuple = tcx * tcy
UpperCAmelCase : Any = 0
for y in range(_SCREAMING_SNAKE_CASE ):
for x in range(_SCREAMING_SNAKE_CASE ):
self._process_tile(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prompt=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , noise_level=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , )
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def _snake_case ( ):
# Run a demo
UpperCAmelCase : Tuple = """stabilityai/stable-diffusion-x4-upscaler"""
UpperCAmelCase : int = StableDiffusionTiledUpscalePipeline.from_pretrained(UpperCamelCase , revision="""fp16""" , torch_dtype=torch.floataa )
UpperCAmelCase : Any = pipe.to("""cuda""" )
UpperCAmelCase : Optional[int] = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" )
def callback(UpperCamelCase : List[str] ):
print(F"progress: {obj['progress']:.4f}" )
obj["image"].save("""diffusers_library_progress.jpg""" )
UpperCAmelCase : Any = pipe(image=UpperCamelCase , prompt="""Black font, white background, vector""" , noise_level=40 , callback=UpperCamelCase )
final_image.save("""diffusers_library.jpg""" )
if __name__ == "__main__":
main()
| 76 | 0 |
'''simple docstring'''
import math
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__SCREAMING_SNAKE_CASE )
if number < 1:
lowercase_ : Union[str, Any] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(__SCREAMING_SNAKE_CASE )
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowercase_ : Any = int(math.log(number // 3 , 2 ) ) + 2
lowercase_ : List[str] = [3, 5]
lowercase_ : Any = 2
lowercase_ : Union[str, Any] = 3
for block in range(1 , __SCREAMING_SNAKE_CASE ):
for _ in range(__SCREAMING_SNAKE_CASE ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
_lowercase : Any = 0
try:
_lowercase : List[str] = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 93 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
a : int = logging.getLogger(__name__)
@dataclass
class a :
"""simple docstring"""
a : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a : bool = field(
default=lowercase__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a : bool = field(
default=lowercase__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a : Optional[int] = field(
default=lowercase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a : Optional[int] = field(
default=lowercase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a : Optional[int] = field(
default=lowercase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class a :
"""simple docstring"""
a : str = field(
default=lowercase__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a : str = field(
default=lowercase__ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
a : Optional[str] = field(
default=lowercase__ , metadata={'help': 'Train language if it is different from the evaluation language.'} )
a : Optional[str] = field(
default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a : Optional[str] = field(
default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a : Optional[str] = field(
default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a : Optional[bool] = field(
default=lowercase__ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
a : bool = field(
default=lowercase__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a : bool = field(
default=lowercase__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
a : bool = field(
default=lowercase__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_xnli""" , __lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCAmelCase : List[Any] = training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
datasets.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__UpperCAmelCase : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
__UpperCAmelCase : Tuple = load_dataset(
"""xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
__UpperCAmelCase : List[Any] = load_dataset(
"""xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : str = train_dataset.features["""label"""].names
if training_args.do_eval:
__UpperCAmelCase : Any = load_dataset(
"""xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : str = eval_dataset.features["""label"""].names
if training_args.do_predict:
__UpperCAmelCase : Optional[Any] = load_dataset(
"""xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : List[str] = predict_dataset.features["""label"""].names
# Labels
__UpperCAmelCase : Tuple = len(__lowerCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , idalabel={str(__lowerCamelCase ): label for i, label in enumerate(__lowerCamelCase )} , labelaid={label: i for i, label in enumerate(__lowerCamelCase )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
__UpperCAmelCase : List[Any] = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__UpperCAmelCase : List[Any] = False
def preprocess_function(__lowerCamelCase : int ):
# Tokenize the texts
return tokenizer(
examples["""premise"""] , examples["""hypothesis"""] , padding=__lowerCamelCase , max_length=data_args.max_seq_length , truncation=__lowerCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
__UpperCAmelCase : int = min(len(__lowerCamelCase ) , data_args.max_train_samples )
__UpperCAmelCase : Dict = train_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
__UpperCAmelCase : Union[str, Any] = train_dataset.map(
__lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__lowerCamelCase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__UpperCAmelCase : Tuple = min(len(__lowerCamelCase ) , data_args.max_eval_samples )
__UpperCAmelCase : List[str] = eval_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
__UpperCAmelCase : Dict = eval_dataset.map(
__lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
__UpperCAmelCase : Dict = min(len(__lowerCamelCase ) , data_args.max_predict_samples )
__UpperCAmelCase : Tuple = predict_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ):
__UpperCAmelCase : Any = predict_dataset.map(
__lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , )
# Get the metric function
__UpperCAmelCase : Tuple = evaluate.load("""xnli""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCamelCase : EvalPrediction ):
__UpperCAmelCase : Optional[Any] = p.predictions[0] if isinstance(p.predictions , __lowerCamelCase ) else p.predictions
__UpperCAmelCase : str = np.argmax(__lowerCamelCase , axis=1 )
return metric.compute(predictions=__lowerCamelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__UpperCAmelCase : Any = default_data_collator
elif training_args.fpaa:
__UpperCAmelCase : Tuple = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 )
else:
__UpperCAmelCase : int = None
# Initialize our Trainer
__UpperCAmelCase : Union[str, Any] = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCamelCase , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
__UpperCAmelCase : List[str] = None
if training_args.resume_from_checkpoint is not None:
__UpperCAmelCase : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCAmelCase : Union[str, Any] = last_checkpoint
__UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=__lowerCamelCase )
__UpperCAmelCase : Dict = train_result.metrics
__UpperCAmelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCamelCase )
)
__UpperCAmelCase : Dict = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __lowerCamelCase )
trainer.save_metrics("""train""" , __lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__UpperCAmelCase : Dict = trainer.evaluate(eval_dataset=__lowerCamelCase )
__UpperCAmelCase : List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCamelCase )
__UpperCAmelCase : Tuple = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.log_metrics("""eval""" , __lowerCamelCase )
trainer.save_metrics("""eval""" , __lowerCamelCase )
# Prediction
if training_args.do_predict:
logger.info("""*** Predict ***""" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = trainer.predict(__lowerCamelCase , metric_key_prefix="""predict""" )
__UpperCAmelCase : int = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__lowerCamelCase )
)
__UpperCAmelCase : Optional[int] = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.log_metrics("""predict""" , __lowerCamelCase )
trainer.save_metrics("""predict""" , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.argmax(__lowerCamelCase , axis=1 )
__UpperCAmelCase : Tuple = os.path.join(training_args.output_dir , """predictions.txt""" )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , """w""" ) as writer:
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__lowerCamelCase ):
__UpperCAmelCase : Tuple = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 114 | 0 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_snake_case : Union[str, Any] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : str, lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Optional[int]=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
__lowerCAmelCase = os.path.abspath(_lowerCamelCase )
logger.info(F"""Loading PyTorch weights from {pt_path}""" )
__lowerCAmelCase = torch.load(_lowerCamelCase, map_location='cpu' )
logger.info(F"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
__lowerCAmelCase = convert_pytorch_state_dict_to_flax(_lowerCamelCase, _lowerCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__lowerCAmelCase = convert_pytorch_sharded_state_dict_to_flax(_lowerCamelCase, _lowerCamelCase )
return flax_state_dict
def a_ ( lowerCAmelCase_ : Tuple[str], lowerCAmelCase_ : np.ndarray, lowerCAmelCase_ : Dict[str, jnp.ndarray], lowerCAmelCase_ : str, ):
def is_key_or_prefix_key_in_dict(lowerCAmelCase_ : Tuple[str] ) -> bool:
return len(set(_lowerCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__lowerCAmelCase = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__lowerCAmelCase = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__lowerCAmelCase = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__lowerCAmelCase = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__lowerCAmelCase = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
__lowerCAmelCase = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__lowerCAmelCase = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
__lowerCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__lowerCAmelCase = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__lowerCAmelCase = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__lowerCAmelCase = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__lowerCAmelCase = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__lowerCAmelCase = pt_tuple_key[-2] + "_v"
if name is not None:
__lowerCAmelCase = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
__lowerCAmelCase = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__lowerCAmelCase = flax_model.params["params"]
else:
__lowerCAmelCase = flax_model.params
__lowerCAmelCase = flatten_dict(_lowerCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__lowerCAmelCase = flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(_lowerCamelCase )
__lowerCAmelCase = {}
__lowerCAmelCase = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__lowerCAmelCase = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__lowerCAmelCase = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__lowerCAmelCase = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__lowerCAmelCase = pt_tuple_key[1:]
# Correctly rename weight parameters
__lowerCAmelCase = rename_key_and_reshape_tensor(
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
# add model prefix if necessary
__lowerCAmelCase = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__lowerCAmelCase = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__lowerCAmelCase = jnp.asarray(_lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCamelCase, _lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
__lowerCAmelCase = jnp.asarray(_lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
__lowerCAmelCase = jnp.asarray(_lowerCamelCase )
return unflatten_dict(_lowerCamelCase )
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Tuple ):
import torch
# Load the index
__lowerCAmelCase = {}
for shard_file in shard_filenames:
# load using msgpack utils
__lowerCAmelCase = torch.load(_lowerCamelCase )
__lowerCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
__lowerCAmelCase = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__lowerCAmelCase = flax_model.params["params"]
__lowerCAmelCase = flatten_dict(_lowerCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
__lowerCAmelCase = flax_model.params
__lowerCAmelCase = flatten_dict(_lowerCamelCase )
__lowerCAmelCase = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__lowerCAmelCase = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__lowerCAmelCase = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__lowerCAmelCase = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__lowerCAmelCase = pt_tuple_key[1:]
# Correctly rename weight parameters
__lowerCAmelCase = rename_key_and_reshape_tensor(
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
# add model prefix if necessary
__lowerCAmelCase = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__lowerCAmelCase = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__lowerCAmelCase = jnp.asarray(_lowerCamelCase )
continue
if "var" in flax_key[-1]:
__lowerCAmelCase = jnp.asarray(_lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCamelCase, _lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
__lowerCAmelCase = jnp.asarray(_lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
__lowerCAmelCase = jnp.asarray(_lowerCamelCase )
return unflatten_dict(_lowerCamelCase )
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = os.path.abspath(_lowerCamelCase )
logger.info(F"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
__lowerCAmelCase = getattr(_lowerCamelCase, 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(_lowerCamelCase, 'rb' ) as state_f:
try:
__lowerCAmelCase = from_bytes(_lowerCamelCase, state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(_lowerCamelCase, _lowerCamelCase )
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Any ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__lowerCAmelCase = flatten_dict(jax.tree_util.tree_map(lambda lowerCAmelCase_ : x.dtype == jnp.bfloataa, _lowerCamelCase ) ).values()
if any(_lowerCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__lowerCAmelCase = jax.tree_util.tree_map(
lambda lowerCAmelCase_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params, _lowerCamelCase )
__lowerCAmelCase = flatten_dict(_lowerCamelCase )
__lowerCAmelCase = pt_model.state_dict()
__lowerCAmelCase = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
__lowerCAmelCase = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__lowerCAmelCase = []
__lowerCAmelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__lowerCAmelCase = flax_key_tuple[0] == pt_model.base_model_prefix
__lowerCAmelCase = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__lowerCAmelCase = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__lowerCAmelCase = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_lowerCamelCase ) not in pt_model_dict:
# conv layer
__lowerCAmelCase = flax_key_tuple[:-1] + ("weight",)
__lowerCAmelCase = jnp.transpose(_lowerCamelCase, (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCamelCase ) not in pt_model_dict:
# linear layer
__lowerCAmelCase = flax_key_tuple[:-1] + ("weight",)
__lowerCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__lowerCAmelCase = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__lowerCAmelCase = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
__lowerCAmelCase = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
__lowerCAmelCase = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__lowerCAmelCase = ".".join(_lowerCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__lowerCAmelCase = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__lowerCAmelCase = key.split('.' )
__lowerCAmelCase = None
if key_components[-3::2] == ["parametrizations", "original0"]:
__lowerCAmelCase = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
__lowerCAmelCase = key_components[-2] + "_v"
if name is not None:
__lowerCAmelCase = key_components[:-3] + [name]
__lowerCAmelCase = ".".join(_lowerCamelCase )
__lowerCAmelCase = key
if flax_key in special_pt_names:
__lowerCAmelCase = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
__lowerCAmelCase = np.asarray(_lowerCamelCase ) if not isinstance(_lowerCamelCase, np.ndarray ) else flax_tensor
__lowerCAmelCase = torch.from_numpy(_lowerCamelCase )
# remove from missing keys
missing_keys.remove(_lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowerCamelCase )
pt_model.load_state_dict(_lowerCamelCase )
# re-transform missing_keys to list
__lowerCAmelCase = list(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(F"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(_lowerCamelCase ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
' use it for predictions and inference.' )
else:
logger.warning(
F"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
'If your task is similar to the task the model of the checkpoint was trained on, '
F"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 370 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def a_ ( lowerCAmelCase_ : Dict[str, torch.Tensor] ):
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
for rt in rc.restypes:
__lowerCAmelCase = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__lowerCAmelCase = {name: i for i, name in enumerate(lowerCAmelCase_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
__lowerCAmelCase = torch.tensor(
lowerCAmelCase_, dtype=torch.intaa, device=protein['aatype'].device, )
__lowerCAmelCase = torch.tensor(
lowerCAmelCase_, dtype=torch.intaa, device=protein['aatype'].device, )
__lowerCAmelCase = torch.tensor(
lowerCAmelCase_, dtype=torch.floataa, device=protein['aatype'].device, )
__lowerCAmelCase = protein['aatype'].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__lowerCAmelCase = restype_atomaa_to_atomaa[protein_aatype]
__lowerCAmelCase = restype_atomaa_mask[protein_aatype]
__lowerCAmelCase = residx_atomaa_mask
__lowerCAmelCase = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__lowerCAmelCase = restype_atomaa_to_atomaa[protein_aatype]
__lowerCAmelCase = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__lowerCAmelCase = torch.zeros([21, 37], dtype=torch.floataa, device=protein['aatype'].device )
for restype, restype_letter in enumerate(rc.restypes ):
__lowerCAmelCase = rc.restype_atoa[restype_letter]
__lowerCAmelCase = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__lowerCAmelCase = rc.atom_order[atom_name]
__lowerCAmelCase = 1
__lowerCAmelCase = restype_atomaa_mask[protein_aatype]
__lowerCAmelCase = residx_atomaa_mask
return protein
def a_ ( lowerCAmelCase_ : Dict[str, torch.Tensor] ):
__lowerCAmelCase = tree_map(lambda lowerCAmelCase_ : torch.tensor(lowerCAmelCase_, device=batch['aatype'].device ), lowerCAmelCase_, np.ndarray )
__lowerCAmelCase = tensor_tree_map(lambda lowerCAmelCase_ : np.array(lowerCAmelCase_ ), make_atomaa_masks(lowerCAmelCase_ ) )
return out
| 207 | 0 |
from __future__ import annotations
from typing import Generic, TypeVar
_snake_case = TypeVar("T")
class lowercase ( Generic[T] ):
def __init__( self , _a ) -> None:
_A : List[str] = data
_A : int = self
_A : Any = 0
class lowercase ( Generic[T] ):
def __init__( self ) -> None:
# map from node name to the node object
_A : dict[T, DisjointSetTreeNode[T]] = {}
def a__ ( self , _a ) -> None:
# create a new set with x as its member
_A : Union[str, Any] = DisjointSetTreeNode(_a )
def a__ ( self , _a ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
_A : Any = self.map[data]
if elem_ref != elem_ref.parent:
_A : Optional[int] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def a__ ( self , _a , _a ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
_A : List[str] = nodea
else:
_A : str = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def a__ ( self , _a , _a ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(_a ) , self.find_set(_a ) )
class lowercase ( Generic[T] ):
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
_A : dict[T, dict[T, int]] = {}
def a__ ( self , _a ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
_A : Dict = {}
def a__ ( self , _a , _a , _a ) -> None:
# add an edge with the given weight
self.add_node(_a )
self.add_node(_a )
_A : Optional[int] = weight
_A : Tuple = weight
def a__ ( self ) -> GraphUndirectedWeighted[T]:
_A : Dict = []
_A : Dict = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _a : x[2] )
# creating the disjoint set
_A : Any = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_a )
# MST generation
_A : Optional[int] = 0
_A : List[Any] = 0
_A : Dict = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
_A , _A , _A : Tuple = edges[index]
index += 1
_A : Optional[Any] = disjoint_set.find_set(_a )
_A : List[str] = disjoint_set.find_set(_a )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_a , _a , _a )
disjoint_set.union(_a , _a )
return graph
| 26 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Any ) -> Optional[Any]:
"""simple docstring"""
with open(__lowercase ) as metadata_file:
__UpperCamelCase = json.load(__lowercase )
__UpperCamelCase = LukeConfig(use_entity_aware_attention=__lowercase , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
__UpperCamelCase = torch.load(__lowercase , map_location='cpu' )
# Load the entity vocab file
__UpperCamelCase = load_entity_vocab(__lowercase )
__UpperCamelCase = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__UpperCamelCase = AddedToken('<ent>' , lstrip=__lowercase , rstrip=__lowercase )
__UpperCamelCase = AddedToken('<ent2>' , lstrip=__lowercase , rstrip=__lowercase )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(__lowercase , __lowercase )
__UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase )
# Initialize the embeddings of the special tokens
__UpperCamelCase = state_dict['embeddings.word_embeddings.weight']
__UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
__UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
__UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__UpperCamelCase = F'''encoder.layer.{layer_index}.attention.self.'''
__UpperCamelCase = state_dict[prefix + matrix_name]
__UpperCamelCase = state_dict[prefix + matrix_name]
__UpperCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__UpperCamelCase = state_dict['entity_embeddings.entity_embeddings.weight']
__UpperCamelCase = entity_emb[entity_vocab['[MASK]']]
__UpperCamelCase = LukeModel(config=__lowercase ).eval()
__UpperCamelCase , __UpperCamelCase = model.load_state_dict(__lowercase , strict=__lowercase )
if not (len(__lowercase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {', '.join(__lowercase )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
F''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' )
# Check outputs
__UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase , task='entity_classification' )
__UpperCamelCase = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
__UpperCamelCase = (39, 42)
__UpperCamelCase = tokenizer(__lowercase , entity_spans=[span] , add_prefix_space=__lowercase , return_tensors='pt' )
__UpperCamelCase = model(**__lowercase )
# Verify word hidden states
if model_size == "large":
__UpperCamelCase = torch.Size((1, 42, 1024) )
__UpperCamelCase = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
__UpperCamelCase = torch.Size((1, 42, 768) )
__UpperCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
__UpperCamelCase = torch.Size((1, 1, 1024) )
__UpperCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
__UpperCamelCase = torch.Size((1, 1, 768) )
__UpperCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(__lowercase ) )
model.save_pretrained(__lowercase )
def lowercase__ ( __lowercase : Dict ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = {}
with open(__lowercase , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(__lowercase ):
__UpperCamelCase , __UpperCamelCase = line.rstrip().split('\t' )
__UpperCamelCase = index
return entity_vocab
if __name__ == "__main__":
a__ : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ : str =parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 53 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 360 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = ['''image_processor''', '''tokenizer''']
__snake_case = '''CLIPImageProcessor'''
__snake_case = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Dict , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : Optional[Any] ) ->List[str]:
"""simple docstring"""
a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __UpperCAmelCase , )
a = kwargs.pop('''feature_extractor''' )
a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self : List[str] , __UpperCAmelCase : Any=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Any=None , **__UpperCAmelCase : str ) ->Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
a = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
a = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : str , **__UpperCAmelCase : Tuple ) ->Any:
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
a = self.tokenizer.model_input_names
a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __UpperCAmelCase , )
return self.image_processor_class
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __UpperCAmelCase , )
return self.image_processor
| 26 | 0 |
import os
def __A ( ) -> List[str]:
a = os.path.dirname(os.path.realpath(__lowerCamelCase ) )
a = os.path.join(__lowerCamelCase , """triangle.txt""" )
with open(__lowerCamelCase ) as f:
a = f.readlines()
a = []
for line in triangle:
a = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(__lowerCamelCase ) )
a.append(__lowerCamelCase )
for i in range(1 , len(__lowerCamelCase ) ):
for j in range(len(a[i] ) ):
a = a[i - 1][j] if j != len(a[i - 1] ) else 0
a = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__lowerCamelCase , __lowerCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 228 |
def __A ( __lowerCamelCase ) -> int:
a = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __A ( __lowerCamelCase = 100 ) -> int:
a = 1
a = 2
for i in range(2 , max_n + 1 ):
a = pre_numerator
a = 2 * i // 3 if i % 3 == 0 else 1
a = cur_numerator
a = e_cont * pre_numerator + temp
return sum_digits(__lowerCamelCase )
if __name__ == "__main__":
print(F'{solution() = }')
| 228 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_lowercase : List[str] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Dict, *lowerCamelCase : Union[str, Any], **lowerCamelCase : List[Any] )-> None:
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''', lowerCamelCase, )
super().__init__(*lowerCamelCase, **lowerCamelCase )
| 272 |
"""simple docstring"""
from collections import defaultdict
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : List[str] )-> Optional[int]:
lowerCamelCase__ : List[Any] =total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
lowerCamelCase__ : Optional[Any] =[
[-1 for i in range(total + 1 )] for j in range(2 ** len(lowerCamelCase ) )
]
lowerCamelCase__ : Any =defaultdict(lowerCamelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
lowerCamelCase__ : List[Any] =(1 << len(lowerCamelCase )) - 1
def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : Any )-> Any:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
lowerCamelCase__ : Optional[int] =self.count_ways_until(lowerCamelCase, task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p), task_no + 1 )
# save the value.
lowerCamelCase__ : int =total_ways_util
return self.dp[mask][task_no]
def snake_case ( self : Dict, lowerCamelCase : Dict )-> int:
# Store the list of persons for each task
for i in range(len(lowerCamelCase ) ):
for j in task_performed[i]:
self.task[j].append(lowerCamelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0, 1 )
if __name__ == "__main__":
_lowercase : Tuple = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_lowercase : Dict = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 272 | 1 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def UpperCAmelCase_ ( __lowercase : SplitDict ) -> int:
'''simple docstring'''
_UpperCAmelCase = split_dict._to_yaml_list()
assert len(__lowercase ) == len(__lowercase )
_UpperCAmelCase = SplitDict._from_yaml_list(__lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_UpperCAmelCase = None
# the split name of split_dict takes over the name of the split info object
_UpperCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name="my_dataset" )] )
def UpperCAmelCase_ ( __lowercase : List[Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 22 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a):
# initialize config
if "resnet-50" in model_name:
SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-50")
elif "resnet-101" in model_name:
SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-101")
else:
raise ValueError("Model name should include either resnet50 or resnet101")
SCREAMING_SNAKE_CASE : str = DetrConfig(use_timm_backbone=_a , backbone_config=_a)
# set label attributes
SCREAMING_SNAKE_CASE : List[str] = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE : Union[str, Any] = 250
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 91
SCREAMING_SNAKE_CASE : str = "huggingface/label-files"
SCREAMING_SNAKE_CASE : Union[str, Any] = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE : Optional[Any] = json.load(open(hf_hub_download(_a , _a , repo_type="dataset") , "r"))
SCREAMING_SNAKE_CASE : int = {int(_a): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[Any] = idalabel
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowerCamelCase__ ( _a):
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight"))
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight"))
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias"))
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean"))
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var"))
# stages
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
))
# 3 convs
for i in range(3):
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
))
# fmt: on
for i in range(config.encoder_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
f"encoder.layers.{i}.self_attn.out_proj.weight",
))
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
f"decoder.layers.{i}.self_attn.out_proj.weight",
))
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
))
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
])
return rename_keys
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : str = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : int = val
def lowerCamelCase__ ( _a , _a=False):
SCREAMING_SNAKE_CASE : Optional[Any] = ""
if is_panoptic:
SCREAMING_SNAKE_CASE : Optional[int] = "detr."
# first: transformer encoder
for i in range(6):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : int = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : str = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : str = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : int = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias")
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight_cross_attn[:256, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[:256]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias_cross_attn[256:512]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight_cross_attn[-256:, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[-256:]
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(_a , stream=_a).raw)
return im
@torch.no_grad()
def lowerCamelCase__ ( _a , _a=None , _a=False):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = get_detr_config(_a)
# load original model from torch hub
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(f"Converting model {model_name}...")
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=_a).eval()
SCREAMING_SNAKE_CASE : Tuple = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_a):
if is_panoptic:
SCREAMING_SNAKE_CASE : List[str] = "detr." + src
rename_key(_a , _a , _a)
# query, key and value matrices need special treatment
read_in_q_k_v(_a , is_panoptic=_a)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE : List[Any] = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr")
and not key.startswith("class_labels_classifier")
and not key.startswith("bbox_predictor")
):
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Optional[int] = val
elif key.startswith("bbox_attention") or key.startswith("mask_head"):
continue
else:
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : List[Any] = val
else:
if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"):
SCREAMING_SNAKE_CASE : Any = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Any = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE : int = DetrForSegmentation(_a) if is_panoptic else DetrForObjectDetection(_a)
model.load_state_dict(_a)
model.eval()
# verify our conversion on an image
SCREAMING_SNAKE_CASE : int = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE : Optional[int] = DetrImageProcessor(format=_a)
SCREAMING_SNAKE_CASE : List[str] = processor(images=prepare_img() , return_tensors="pt")
SCREAMING_SNAKE_CASE : Any = encoding["pixel_values"]
SCREAMING_SNAKE_CASE : Optional[Any] = detr(_a)
SCREAMING_SNAKE_CASE : Any = model(_a)
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3)
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3)
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
Path(_a).mkdir(exist_ok=_a)
model.save_pretrained(_a)
processor.save_pretrained(_a)
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub...")
model.push_to_hub(f"nielsr/{model_name}")
processor.push_to_hub(f"nielsr/{model_name}")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
a_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 76 | 0 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray ):
'''simple docstring'''
UpperCAmelCase__ = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Predict target for test data
UpperCAmelCase__ = xgb.predict(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = predictions.reshape(len(SCREAMING_SNAKE_CASE__ ) , 1 )
return predictions
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = fetch_california_housing()
UpperCAmelCase__ , UpperCAmelCase__ = data_handling(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = train_test_split(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , test_size=0.25 , random_state=1 )
UpperCAmelCase__ = xgboost(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' )
print(F'''Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 61 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = """cvt"""
def __init__( self : List[Any] , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : int=[7, 3, 3] , _UpperCAmelCase : Optional[Any]=[4, 2, 2] , _UpperCAmelCase : List[Any]=[2, 1, 1] , _UpperCAmelCase : Optional[int]=[64, 1_92, 3_84] , _UpperCAmelCase : Any=[1, 3, 6] , _UpperCAmelCase : Tuple=[1, 2, 10] , _UpperCAmelCase : Union[str, Any]=[4.0, 4.0, 4.0] , _UpperCAmelCase : Optional[int]=[0.0, 0.0, 0.0] , _UpperCAmelCase : Dict=[0.0, 0.0, 0.0] , _UpperCAmelCase : Dict=[0.0, 0.0, 0.1] , _UpperCAmelCase : Optional[int]=[True, True, True] , _UpperCAmelCase : Dict=[False, False, True] , _UpperCAmelCase : Dict=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase : int=[3, 3, 3] , _UpperCAmelCase : Optional[int]=[1, 1, 1] , _UpperCAmelCase : List[Any]=[2, 2, 2] , _UpperCAmelCase : Union[str, Any]=[1, 1, 1] , _UpperCAmelCase : str=[1, 1, 1] , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Dict=1E-12 , **_UpperCAmelCase : Any , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = patch_sizes
UpperCAmelCase__ = patch_stride
UpperCAmelCase__ = patch_padding
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = depth
UpperCAmelCase__ = mlp_ratio
UpperCAmelCase__ = attention_drop_rate
UpperCAmelCase__ = drop_rate
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = cls_token
UpperCAmelCase__ = qkv_projection_method
UpperCAmelCase__ = kernel_qkv
UpperCAmelCase__ = padding_kv
UpperCAmelCase__ = stride_kv
UpperCAmelCase__ = padding_q
UpperCAmelCase__ = stride_q
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
| 61 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 59 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 207 | 0 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class _snake_case :
def __init__( self , _lowerCamelCase , ):
a :str = parent
a :Any = 13
a :Any = 7
a :str = True
a :Union[str, Any] = True
a :Tuple = False
a :Optional[int] = True
a :List[str] = 99
a :Optional[int] = 32
a :str = 2
a :Tuple = 4
a :str = 37
a :List[str] = '''gelu'''
a :Dict = 0.1
a :Optional[int] = 0.1
a :Dict = 512
a :int = 16
a :Any = 2
a :Any = 0.02
a :Union[str, Any] = 3
a :Any = 4
a :Any = None
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a :List[Any] = None
if self.use_input_mask:
a :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
a :Tuple = None
a :Union[str, Any] = None
a :List[Any] = None
if self.use_labels:
a :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a :List[Any] = ids_tensor([self.batch_size] , self.num_choices )
a :List[str] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :str = TFDistilBertModel(config=_lowerCamelCase )
a :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
a :Optional[Any] = model(_lowerCamelCase )
a :Optional[int] = [input_ids, input_mask]
a :str = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Optional[int] = TFDistilBertForMaskedLM(config=_lowerCamelCase )
a :Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
a :Any = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :str = TFDistilBertForQuestionAnswering(config=_lowerCamelCase )
a :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
a :List[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Any = self.num_labels
a :str = TFDistilBertForSequenceClassification(_lowerCamelCase )
a :List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
a :Dict = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :int = self.num_choices
a :Any = TFDistilBertForMultipleChoice(_lowerCamelCase )
a :Tuple = tf.tile(tf.expand_dims(_lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
a :Optional[Any] = tf.tile(tf.expand_dims(_lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
a :Optional[int] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
a :List[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Any = self.num_labels
a :List[str] = TFDistilBertForTokenClassification(_lowerCamelCase )
a :int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
a :int = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = self.prepare_config_and_inputs()
((a) , (a) , (a) , (a) , (a) , (a)) :Dict = config_and_inputs
a :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _snake_case ( _snake_case , _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
SCREAMING_SNAKE_CASE__ = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = TFDistilBertModelTester(self )
a :str = ConfigTester(self , config_class=_lowerCamelCase , dim=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
a :Any = TFDistilBertModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
a :Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
a :str = model(_lowerCamelCase )[0]
a :Optional[Any] = [1, 6, 768]
self.assertEqual(output.shape , _lowerCamelCase )
a :List[str] = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCamelCase , atol=1e-4 )
| 281 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case : Any = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
snake_case : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 281 | 1 |
"""simple docstring"""
import enum
import shutil
import sys
_A , _A = shutil.get_terminal_size()
_A = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class _lowerCamelCase ( enum.Enum ):
_lowerCamelCase :Tuple = 0
_lowerCamelCase :Any = 1
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase="" ) -> int:
sys.stdout.write(str(snake_case_ ) + end )
sys.stdout.flush()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="" ) -> Optional[Any]:
forceWrite(f"""\u001b[{color}m{content}\u001b[0m""" , snake_case_ )
def lowercase_ ( ) -> str:
forceWrite("""\r""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def lowercase_ ( ) -> str:
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def lowercase_ ( ) -> int:
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 242 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCAmelCase_ ( snake_case_ ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_A : str = k.replace(snake_case_,snake_case_ )
if k.startswith("""encoder""" ):
_A : Optional[Any] = k.replace(""".attn""",""".self_attn""" )
_A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" )
elif k.startswith("""decoder""" ):
_A : str = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" )
_A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" )
return k
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
_A : str = sd.pop(snake_case_ )
_A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" )
assert new_k not in sd
_A : Optional[int] = v
_snake_case = ["START"]
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
_A : List[Any] = model["""model"""]
_A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ )
_A : List[str] = BlenderbotForConditionalGeneration(snake_case_ )
_A : Tuple = m.model.state_dict().keys()
_A : Any = []
_A : Dict = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_A : Optional[int] = rename_state_dict_key(snake_case_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_A : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case_ )
m.model.load_state_dict(snake_case_,strict=snake_case_ )
m.half()
m.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_snake_case = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase : List[str] = 16
lowerCAmelCase : Optional[int] = 32
def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 , _UpperCAmelCase = "bert-base-cased" ):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained(_a )
SCREAMING_SNAKE_CASE_: Optional[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_a , max_length=_a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_: Dict = datasets.map(
_a , batched=_a , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=_a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Optional[int] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_a , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(_a , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Dict = DataLoader(
tokenized_datasets["train"] , shuffle=_a , collate_fn=_a , batch_size=_a )
SCREAMING_SNAKE_CASE_: Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=_a , collate_fn=_a , batch_size=_a )
return train_dataloader, eval_dataloader
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
model.eval()
SCREAMING_SNAKE_CASE_: Any = 0
for step, batch in enumerate(_a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Any = model(**_a )
SCREAMING_SNAKE_CASE_: Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_a ) - 1:
SCREAMING_SNAKE_CASE_: List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE_: List[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_a , references=_a , )
SCREAMING_SNAKE_CASE_: Tuple = metric.compute()
return eval_metric["accuracy"]
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: Any = config["lr"]
SCREAMING_SNAKE_CASE_: str = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_: List[Any] = int(config["seed"] )
SCREAMING_SNAKE_CASE_: Any = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_: List[str] = args.model_name_or_path
set_seed(_a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = get_dataloaders(_a , _a , _a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: Tuple = AutoModelForSequenceClassification.from_pretrained(_a , return_dict=_a )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE_: List[str] = optimizer_cls(params=model.parameters() , lr=_a )
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE_: Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
SCREAMING_SNAKE_CASE_: int = 1
SCREAMING_SNAKE_CASE_: Optional[int] = (len(_a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE_: List[str] = get_linear_schedule_with_warmup(
optimizer=_a , num_warmup_steps=0 , num_training_steps=_a , )
else:
SCREAMING_SNAKE_CASE_: int = DummyScheduler(_a , total_num_steps=_a , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = accelerator.prepare(
_a , _a , _a , _a , _a )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE_: List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE_: str = 0
SCREAMING_SNAKE_CASE_: int = evaluate.load("glue" , "mrpc" )
SCREAMING_SNAKE_CASE_: Tuple = num_epochs
if args.partial_train_epoch is not None:
SCREAMING_SNAKE_CASE_: str = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
SCREAMING_SNAKE_CASE_: Dict = args.resume_from_checkpoint.split("epoch_" )[1]
SCREAMING_SNAKE_CASE_: Optional[int] = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(_a ) + 1
SCREAMING_SNAKE_CASE_: str = evaluation_loop(_a , _a , _a , _a )
accelerator.print("resumed checkpoint performance:" , _a )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , f"state_{starting_epoch-1}.json" ) , "r" ) as f:
SCREAMING_SNAKE_CASE_: Dict = json.load(_a )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
SCREAMING_SNAKE_CASE_: List[str] = {}
for epoch in range(_a , _a ):
model.train()
for step, batch in enumerate(_a ):
SCREAMING_SNAKE_CASE_: Optional[int] = model(**_a )
SCREAMING_SNAKE_CASE_: List[str] = outputs.loss
SCREAMING_SNAKE_CASE_: str = loss / gradient_accumulation_steps
accelerator.backward(_a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
SCREAMING_SNAKE_CASE_: Any = f"epoch_{epoch}"
SCREAMING_SNAKE_CASE_: Union[str, Any] = os.path.join(args.output_dir , _a )
accelerator.save_state(_a )
SCREAMING_SNAKE_CASE_: Dict = evaluation_loop(_a , _a , _a , _a )
SCREAMING_SNAKE_CASE_: str = accuracy
SCREAMING_SNAKE_CASE_: Any = lr_scheduler.get_lr()[0]
SCREAMING_SNAKE_CASE_: Tuple = optimizer.param_groups[0]["lr"]
SCREAMING_SNAKE_CASE_: Tuple = epoch
SCREAMING_SNAKE_CASE_: Tuple = overall_step
accelerator.print(f"epoch {epoch}:" , _a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"state_{epoch}.json" ) , "w" ) as f:
json.dump(_a , _a )
def A_ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=_a , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_a , )
parser.add_argument(
"--output_dir" , type=_a , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=_a , default=_a , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=_a , default=_a , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=_a , default=2 , help="Number of train epochs." , )
SCREAMING_SNAKE_CASE_: Any = parser.parse_args()
SCREAMING_SNAKE_CASE_: List[Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(_a , _a )
if __name__ == "__main__":
main()
| 361 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
SCREAMING_SNAKE_CASE_: str = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = -1
SCREAMING_SNAKE_CASE_: Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: int = TextStreamer(lowerCAmelCase__)
model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE_: Union[str, Any] = cs.out[:-1]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
SCREAMING_SNAKE_CASE_: int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = -1
SCREAMING_SNAKE_CASE_: int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = tokenizer.decode(greedy_ids[0])
SCREAMING_SNAKE_CASE_: int = TextIteratorStreamer(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
SCREAMING_SNAKE_CASE_: Tuple = Thread(target=model.generate , kwargs=lowerCAmelCase__)
thread.start()
SCREAMING_SNAKE_CASE_: Optional[Any] = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
SCREAMING_SNAKE_CASE_: int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = -1
SCREAMING_SNAKE_CASE_: Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = greedy_ids[:, input_ids.shape[1] :]
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: Dict = TextStreamer(lowerCAmelCase__ , skip_prompt=lowerCAmelCase__)
model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE_: Any = cs.out[:-1]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("distilgpt2")
SCREAMING_SNAKE_CASE_: List[str] = AutoModelForCausalLM.from_pretrained("distilgpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = -1
SCREAMING_SNAKE_CASE_: List[str] = torch.ones((1, 5) , device=lowerCAmelCase__).long() * model.config.bos_token_id
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: Union[str, Any] = TextStreamer(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
model.generate(lowerCAmelCase__ , max_new_tokens=1 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
SCREAMING_SNAKE_CASE_: str = cs.out[:-1] # Remove the final "\n"
SCREAMING_SNAKE_CASE_: Tuple = tokenizer(lowerCAmelCase__ , return_tensors="pt")
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
SCREAMING_SNAKE_CASE_: List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = -1
SCREAMING_SNAKE_CASE_: List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = TextIteratorStreamer(lowerCAmelCase__ , timeout=0.001)
SCREAMING_SNAKE_CASE_: Any = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
SCREAMING_SNAKE_CASE_: Optional[Any] = Thread(target=model.generate , kwargs=lowerCAmelCase__)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Tuple = ""
for new_text in streamer:
streamer_text += new_text
| 127 | 0 |
'''simple docstring'''
def snake_case__ ( _A: int ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(_A , _A ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(_A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 272 | '''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
def a_ ( self):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = DPTImageProcessor if is_vision_available() else None
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = DPTImageProcessingTester(self)
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__lowerCAmelCase , """image_mean"""))
self.assertTrue(hasattr(__lowerCAmelCase , """image_std"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize"""))
self.assertTrue(hasattr(__lowerCAmelCase , """size"""))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18})
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42})
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor)
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 272 | 1 |
import re
import subprocess
import sys
_a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
_a = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode("""utf-8""").split()
)
_a = """|""".join(sys.argv[1:])
_a = re.compile(RF"""^({joined_dirs}).*?\.py$""")
_a = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 359 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'bert'
def __init__( self , __a=3_05_22 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=2 , __a=0.02 , __a=1e-12 , __a=0 , __a="absolute" , __a=True , __a=None , **__a , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=__a , **__a)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
_UpperCamelCase = classifier_dropout
class _UpperCAmelCase( lowerCamelCase ):
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 100 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
_a = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
_a = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
_a = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 61 |
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224", out_features=["stage1", "stage2", "stage3", "stage4"] )
UpperCAmelCase_ : Dict = MaskFormerConfig(backbone_config=__lowerCamelCase )
UpperCAmelCase_ : int = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
UpperCAmelCase_ : Dict = 847
UpperCAmelCase_ : str = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
UpperCAmelCase_ : Tuple = 150
UpperCAmelCase_ : int = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
UpperCAmelCase_ : str = 171
UpperCAmelCase_ : Optional[int] = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
UpperCAmelCase_ : int = 133
UpperCAmelCase_ : Tuple = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
UpperCAmelCase_ : List[Any] = 19
UpperCAmelCase_ : Optional[int] = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
UpperCAmelCase_ : Any = 65
UpperCAmelCase_ : Union[str, Any] = "mapillary-vistas-id2label.json"
UpperCAmelCase_ : Any = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) )
UpperCAmelCase_ : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
return config
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Dict = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = dct.pop(__lowerCamelCase )
UpperCAmelCase_ : str = val
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ : List[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ : Tuple = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase_ : Optional[int] = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Tuple = in_proj_weight[:dim, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[: dim]
UpperCAmelCase_ : Any = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ : Optional[int] = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ : Tuple = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ : Tuple = in_proj_bias[-dim :]
# fmt: on
def __a ( __lowerCamelCase, __lowerCamelCase ):
# fmt: off
UpperCAmelCase_ : Dict = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ : int = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
UpperCAmelCase_ : int = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Any = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ : int = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ : Dict = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ : str = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
UpperCAmelCase_ : Dict = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : str = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ : Tuple = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ : int = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ : List[str] = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ : List[Any] = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def __a ( ):
UpperCAmelCase_ : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : Tuple = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = False ):
UpperCAmelCase_ : List[str] = get_maskformer_config(__lowerCamelCase )
# load original state_dict
with open(__lowerCamelCase, "rb" ) as f:
UpperCAmelCase_ : Union[str, Any] = pickle.load(__lowerCamelCase )
UpperCAmelCase_ : str = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCAmelCase_ : int = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
read_in_swin_q_k_v(__lowerCamelCase, config.backbone_config )
read_in_decoder_q_k_v(__lowerCamelCase, __lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
UpperCAmelCase_ : Optional[int] = torch.from_numpy(__lowerCamelCase )
# load 🤗 model
UpperCAmelCase_ : Dict = MaskFormerForInstanceSegmentation(__lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(__lowerCamelCase, param.shape )
UpperCAmelCase_ , UpperCAmelCase_ : str = model.load_state_dict(__lowerCamelCase, strict=__lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__lowerCamelCase ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
UpperCAmelCase_ : Optional[int] = prepare_img()
if "vistas" in model_name:
UpperCAmelCase_ : List[str] = 65
elif "cityscapes" in model_name:
UpperCAmelCase_ : Tuple = 6_5535
else:
UpperCAmelCase_ : Dict = 255
UpperCAmelCase_ : Optional[Any] = True if "ade" in model_name else False
UpperCAmelCase_ : Dict = MaskFormerImageProcessor(ignore_index=__lowerCamelCase, reduce_labels=__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = image_processor(__lowerCamelCase, return_tensors="pt" )
UpperCAmelCase_ : Dict = model(**__lowerCamelCase )
print("Logits:", outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCAmelCase_ : Any = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], __lowerCamelCase, atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_a = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 61 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=400 , __a=True , __a=None , __a=True , __a=None , __a=True , __a=[0.5, 0.5, 0.5] , __a=[0.5, 0.5, 0.5] , __a=False , ):
'''simple docstring'''
__a : Optional[Any] = size if size is not None else {'height': 20, 'width': 20}
__a : Union[str, Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
__a : Tuple = parent
__a : Optional[int] = batch_size
__a : List[str] = num_channels
__a : Dict = image_size
__a : Optional[Any] = min_resolution
__a : Optional[Any] = max_resolution
__a : Optional[Any] = do_resize
__a : Any = size
__a : Union[str, Any] = do_center_crop
__a : str = crop_size
__a : int = do_normalize
__a : Any = image_mean
__a : int = image_std
__a : str = do_reduce_labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCamelCase ():
__a : Dict = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__a : Dict = Image.open(dataset[0]['file'] )
__a : int = Image.open(dataset[1]['file'] )
return image, map
def lowerCamelCase ():
__a : Any = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__a : Union[str, Any] = Image.open(ds[0]['file'] )
__a : str = Image.open(ds[1]['file'] )
__a : List[str] = Image.open(ds[2]['file'] )
__a : Any = Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = BeitImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = BeitImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , 'do_resize' ) )
self.assertTrue(hasattr(__a , 'size' ) )
self.assertTrue(hasattr(__a , 'do_center_crop' ) )
self.assertTrue(hasattr(__a , 'center_crop' ) )
self.assertTrue(hasattr(__a , 'do_normalize' ) )
self.assertTrue(hasattr(__a , 'image_mean' ) )
self.assertTrue(hasattr(__a , 'image_std' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 20, 'width': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
self.assertEqual(image_processor.do_reduce_labels , __a )
__a : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__a )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
self.assertEqual(image_processor.do_reduce_labels , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
__a : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__a : Dict = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
__a : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__a : Optional[int] = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
__a : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__a : Any = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
__a : Tuple = []
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__a : Any = image_processing(image_inputs[0] , maps[0] , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched
__a : int = image_processing(__a , __a , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test not batched input (PIL images)
__a , __a : Union[str, Any] = prepare_semantic_single_inputs()
__a : Any = image_processing(__a , __a , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched input (PIL images)
__a , __a : Tuple = prepare_semantic_batch_inputs()
__a : Optional[int] = image_processing(__a , __a , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__a , __a : Tuple = prepare_semantic_single_inputs()
__a : List[str] = image_processing(__a , __a , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 150 )
__a : Union[str, Any] = True
__a : Tuple = image_processing(__a , __a , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294 | 0 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _snake_case ( snake_case ):
UpperCamelCase__ = ['image_processor', 'tokenizer']
UpperCamelCase__ = 'AutoImageProcessor'
UpperCamelCase__ = 'AutoTokenizer'
def __init__( self , _a=None , _a=None , **_a ):
__magic_name__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _a , )
__magic_name__ : Union[str, Any] = kwargs.pop("feature_extractor" )
__magic_name__ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_a , _a )
__magic_name__ : Tuple = self.image_processor
__magic_name__ : Union[str, Any] = False
def __call__( self , *_a , **_a ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
__magic_name__ : str = kwargs.pop("images" , _a )
__magic_name__ : Dict = kwargs.pop("text" , _a )
if len(_a ) > 0:
__magic_name__ : List[Any] = args[0]
__magic_name__ : Any = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
__magic_name__ : Tuple = self.image_processor(_a , *_a , **_a )
if text is not None:
__magic_name__ : str = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif images is None:
return encodings
else:
__magic_name__ : List[str] = encodings["input_ids"]
return inputs
def SCREAMING_SNAKE_CASE ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def SCREAMING_SNAKE_CASE ( self ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
__magic_name__ : Any = True
__magic_name__ : Dict = self.tokenizer
yield
__magic_name__ : Any = self.image_processor
__magic_name__ : Optional[int] = False
def SCREAMING_SNAKE_CASE ( self , _a , _a=False , _a=None ):
if added_vocab is None:
__magic_name__ : Optional[int] = self.tokenizer.get_added_vocab()
__magic_name__ : int = {}
while tokens:
__magic_name__ : Optional[Any] = re.search(r"<s_(.*?)>" , _a , re.IGNORECASE )
if start_token is None:
break
__magic_name__ : List[str] = start_token.group(1 )
__magic_name__ : Optional[Any] = re.search(rf'''</s_{key}>''' , _a , re.IGNORECASE )
__magic_name__ : Union[str, Any] = start_token.group()
if end_token is None:
__magic_name__ : str = tokens.replace(_a , "" )
else:
__magic_name__ : Optional[int] = end_token.group()
__magic_name__ : Dict = re.escape(_a )
__magic_name__ : List[str] = re.escape(_a )
__magic_name__ : str = re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''' , _a , re.IGNORECASE )
if content is not None:
__magic_name__ : int = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__magic_name__ : List[str] = self.tokenajson(_a , is_inner_value=_a , added_vocab=_a )
if value:
if len(_a ) == 1:
__magic_name__ : Any = value[0]
__magic_name__ : int = value
else: # leaf nodes
__magic_name__ : str = []
for leaf in content.split(r"<sep/>" ):
__magic_name__ : Optional[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__magic_name__ : Optional[Any] = leaf[1:-2] # for categorical special tokens
output[key].append(_a )
if len(output[key] ) == 1:
__magic_name__ : Any = output[key][0]
__magic_name__ : Optional[Any] = tokens[tokens.find(_a ) + len(_a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_a , added_vocab=_a )
if len(_a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , )
return self.image_processor
| 281 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case : int = logging.get_logger(__name__)
snake_case : List[str] = {"vocab_file": "spiece.model"}
snake_case : List[str] = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
snake_case : Tuple = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
snake_case : List[str] = "▁"
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a=True , _a=True , _a=False , _a="[CLS]" , _a="[SEP]" , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , _a = None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__magic_name__ : str = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
__magic_name__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
__magic_name__ : Dict = do_lower_case
__magic_name__ : Tuple = remove_space
__magic_name__ : Union[str, Any] = keep_accents
__magic_name__ : Tuple = vocab_file
__magic_name__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__magic_name__ : List[str] = self.__dict__.copy()
__magic_name__ : Any = None
return state
def __setstate__( self , _a ):
__magic_name__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__magic_name__ : str = {}
__magic_name__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self , _a ):
if self.remove_space:
__magic_name__ : List[Any] = " ".join(inputs.strip().split() )
else:
__magic_name__ : str = inputs
__magic_name__ : int = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__magic_name__ : str = unicodedata.normalize("NFKD" , _a )
__magic_name__ : Tuple = "".join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
__magic_name__ : int = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = self.preprocess_text(_a )
__magic_name__ : Dict = self.sp_model.encode(_a , out_type=_a )
__magic_name__ : Any = []
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__magic_name__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__magic_name__ : List[str] = cur_pieces[1:]
else:
__magic_name__ : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.PieceToId(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.IdToPiece(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Any = []
__magic_name__ : Union[str, Any] = ""
__magic_name__ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
__magic_name__ : List[Any] = True
__magic_name__ : Optional[int] = []
else:
current_sub_tokens.append(_a )
__magic_name__ : Optional[Any] = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : List[str] = [self.sep_token_id]
__magic_name__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Optional[int] = [self.sep_token_id]
__magic_name__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : List[str] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
__magic_name__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 281 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : int = StableDiffusionInpaintPipeline
UpperCAmelCase__ : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__ : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : Optional[int] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Tuple = frozenset([] )
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
torch.manual_seed(0 )
a__ = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=9 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,attention_head_dim=(2, 4) ,use_linear_projection=__snake_case ,)
a__ = PNDMScheduler(skip_prk_steps=__snake_case )
torch.manual_seed(0 )
a__ = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=1_28 ,)
torch.manual_seed(0 )
a__ = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act='gelu' ,projection_dim=5_12 ,)
a__ = CLIPTextModel(__snake_case )
a__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCamelCase__( self :Optional[int] ,__snake_case :Union[str, Any] ,__snake_case :int=0 ) -> List[Any]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
a__ = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__snake_case ) ).to(__snake_case )
a__ = image.cpu().permute(0 ,2 ,3 ,1 )[0]
a__ = Image.fromarray(np.uinta(__snake_case ) ).convert('RGB' ).resize((64, 64) )
a__ = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(__snake_case ).startswith('mps' ):
a__ = torch.manual_seed(__snake_case )
else:
a__ = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase__( self :int ) -> Union[str, Any]:
a__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ = self.get_dummy_components()
a__ = StableDiffusionInpaintPipeline(**__snake_case )
a__ = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
a__ = self.get_dummy_inputs(__snake_case )
a__ = sd_pipe(**__snake_case ).images
a__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__( self :List[Any] ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Dict ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[int]:
a__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
a__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
a__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
a__ = 'stabilityai/stable-diffusion-2-inpainting'
a__ = StableDiffusionInpaintPipeline.from_pretrained(__snake_case ,safety_checker=__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
a__ = 'Face of a yellow cat, high resolution, sitting on a park bench'
a__ = torch.manual_seed(0 )
a__ = pipe(
prompt=__snake_case ,image=__snake_case ,mask_image=__snake_case ,generator=__snake_case ,output_type='np' ,)
a__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCamelCase__( self :List[Any] ) -> List[Any]:
a__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
a__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
a__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
a__ = 'stabilityai/stable-diffusion-2-inpainting'
a__ = StableDiffusionInpaintPipeline.from_pretrained(
__snake_case ,torch_dtype=torch.floataa ,safety_checker=__snake_case ,)
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
a__ = 'Face of a yellow cat, high resolution, sitting on a park bench'
a__ = torch.manual_seed(0 )
a__ = pipe(
prompt=__snake_case ,image=__snake_case ,mask_image=__snake_case ,generator=__snake_case ,output_type='np' ,)
a__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCamelCase__( self :List[str] ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
a__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
a__ = 'stabilityai/stable-diffusion-2-inpainting'
a__ = PNDMScheduler.from_pretrained(__snake_case ,subfolder='scheduler' )
a__ = StableDiffusionInpaintPipeline.from_pretrained(
__snake_case ,safety_checker=__snake_case ,scheduler=__snake_case ,torch_dtype=torch.floataa ,)
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
a__ = 'Face of a yellow cat, high resolution, sitting on a park bench'
a__ = torch.manual_seed(0 )
a__ = pipe(
prompt=__snake_case ,image=__snake_case ,mask_image=__snake_case ,generator=__snake_case ,num_inference_steps=2 ,output_type='np' ,)
a__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 361 |
def __lowercase ( __lowerCAmelCase : int ):
if num <= 0:
raise ValueError('Input must be a positive integer' )
a__ = [True] * (num + 1)
a__ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCAmelCase ):
a__ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : Optional[Any] = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 109 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''markuplm'''
def __init__( self : Any ,A_ : List[Any]=3_0522 ,A_ : Tuple=768 ,A_ : Dict=12 ,A_ : Tuple=12 ,A_ : List[Any]=3072 ,A_ : Dict="gelu" ,A_ : List[str]=0.1 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=512 ,A_ : Dict=2 ,A_ : Optional[int]=0.02 ,A_ : Optional[Any]=1e-12 ,A_ : List[Any]=0 ,A_ : Optional[int]=0 ,A_ : Union[str, Any]=2 ,A_ : Optional[int]=256 ,A_ : Dict=1024 ,A_ : Optional[Any]=216 ,A_ : str=1001 ,A_ : Any=32 ,A_ : Optional[int]=50 ,A_ : Any="absolute" ,A_ : Optional[int]=True ,A_ : List[Any]=None ,**A_ : int ,) -> Dict:
super().__init__(
pad_token_id=A_ ,bos_token_id=A_ ,eos_token_id=A_ ,**A_ ,)
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = position_embedding_type
A = use_cache
A = classifier_dropout
# additional properties
A = max_depth
A = max_xpath_tag_unit_embeddings
A = max_xpath_subs_unit_embeddings
A = tag_pad_id
A = subs_pad_id
A = xpath_unit_hidden_size | 74 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = {}
snake_case = job['''started_at''']
snake_case = job['''completed_at''']
snake_case = date_parser.parse(UpperCamelCase_ )
snake_case = date_parser.parse(UpperCamelCase_ )
snake_case = round((end_datetime - start_datetime).total_seconds() / 60.0 )
snake_case = start
snake_case = end
snake_case = duration_in_min
return job_info
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_=None ):
"""simple docstring"""
snake_case = None
if token is not None:
snake_case = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
snake_case = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
snake_case = requests.get(UpperCamelCase_ ,headers=UpperCamelCase_ ).json()
snake_case = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(UpperCamelCase_ ) for job in result['''jobs''']} )
snake_case = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(UpperCamelCase_ ):
snake_case = requests.get(url + F'''&page={i + 2}''' ,headers=UpperCamelCase_ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(UpperCamelCase_ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
_SCREAMING_SNAKE_CASE : int = get_job_time(args.workflow_run_id)
_SCREAMING_SNAKE_CASE : Dict = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v['duration']}''')
| 127 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Union[str, Any] ={
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] =[
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 351 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowercase : List[Any] ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int =["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowercase : Tuple =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 266 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : str = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __lowerCAmelCase ( __a ):
UpperCamelCase__ = '''convbert'''
def __init__( self :List[Any] , __magic_name__ :str=3_0522 , __magic_name__ :Optional[int]=768 , __magic_name__ :int=12 , __magic_name__ :Any=12 , __magic_name__ :Union[str, Any]=3072 , __magic_name__ :List[str]="gelu" , __magic_name__ :Optional[int]=0.1 , __magic_name__ :Any=0.1 , __magic_name__ :List[Any]=512 , __magic_name__ :Optional[Any]=2 , __magic_name__ :Dict=0.02 , __magic_name__ :Dict=1E-1_2 , __magic_name__ :Optional[Any]=1 , __magic_name__ :int=0 , __magic_name__ :str=2 , __magic_name__ :Optional[int]=768 , __magic_name__ :Optional[int]=2 , __magic_name__ :int=9 , __magic_name__ :str=1 , __magic_name__ :Any=None , **__magic_name__ :Dict , ):
'''simple docstring'''
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = embedding_size
a = head_ratio
a = conv_kernel_size
a = num_groups
a = classifier_dropout
class __lowerCAmelCase ( __a ):
@property
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
if self.task == "multiple-choice":
a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 228 |
"""simple docstring"""
from math import isqrt, loga
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = False
return [i for i in range(2 , UpperCamelCase_ ) if is_prime[i]]
def _lowerCAmelCase ( UpperCamelCase_ = 80_0800 , UpperCamelCase_ = 80_0800 ):
__SCREAMING_SNAKE_CASE = degree * loga(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = int(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = calculate_prime_numbers(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 100 | 0 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a ( _A ):
'''simple docstring'''
def __init__( self : Tuple , __snake_case : List[Any] , __snake_case : Optional[Any]=13 , __snake_case : int=7 , __snake_case : List[Any]=True , __snake_case : Optional[Any]=True , __snake_case : Optional[int]=False , __snake_case : int=True , __snake_case : Union[str, Any]=99 , __snake_case : Optional[int]=32 , __snake_case : Union[str, Any]=5 , __snake_case : List[Any]=4 , __snake_case : Union[str, Any]=64 , __snake_case : Dict="gelu" , __snake_case : Any=0.1 , __snake_case : List[Any]=0.1 , __snake_case : int=5_12 , __snake_case : Optional[int]=16 , __snake_case : Optional[Any]=2 , __snake_case : Any=0.02 , __snake_case : List[str]=3 , __snake_case : List[Any]=4 , __snake_case : str=None , __snake_case : Optional[Any]=2 , __snake_case : Optional[int]=2 , __snake_case : Optional[Any]=2 , __snake_case : List[str]=2 , __snake_case : Dict=4 , __snake_case : Any=1 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
UpperCAmelCase_ = q_groups
UpperCAmelCase_ = k_groups
UpperCAmelCase_ = v_groups
UpperCAmelCase_ = post_attention_groups
UpperCAmelCase_ = intermediate_groups
UpperCAmelCase_ = output_groups
def lowerCamelCase_ ( self : List[str] ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : str ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowerCamelCase_ ( self : int , __snake_case : Tuple , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : str ):
UpperCAmelCase_ = SqueezeBertModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = model(__snake_case , __snake_case )
UpperCAmelCase_ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : int , __snake_case : List[str] , __snake_case : str , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
UpperCAmelCase_ = SqueezeBertForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : int , __snake_case : List[Any] ):
UpperCAmelCase_ = SqueezeBertForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = model(
__snake_case , attention_mask=__snake_case , start_positions=__snake_case , end_positions=__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : Any , __snake_case : int , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : List[Any] ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = SqueezeBertForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = SqueezeBertForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Tuple , __snake_case : Union[str, Any] ):
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = SqueezeBertForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = model(
__snake_case , attention_mask=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : List[Any] ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = config_and_inputs
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( _A , _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCAmelCase : Optional[int] = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase : str = False
lowerCAmelCase : Dict = True
lowerCAmelCase : List[Any] = False
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ = SqueezeBertModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__snake_case , dim=37 )
def lowerCamelCase_ ( self : Tuple ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*__snake_case )
def lowerCamelCase_ ( self : Tuple ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*__snake_case )
def lowerCamelCase_ ( self : Any ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*__snake_case )
def lowerCamelCase_ ( self : Any ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__snake_case )
def lowerCamelCase_ ( self : Optional[int] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*__snake_case )
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__snake_case )
@slow
def lowerCamelCase_ ( self : int ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = SqueezeBertModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_sentencepiece
@require_tokenizers
@require_torch
class a ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase_ ( self : List[str] ):
UpperCAmelCase_ = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
UpperCAmelCase_ = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
UpperCAmelCase_ = model(__snake_case )[0]
UpperCAmelCase_ = torch.Size((1, 3) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase_ = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-4 ) )
| 177 |
from __future__ import annotations
import os
from collections.abc import Mapping
_lowerCamelCase = tuple[int, int]
class a :
'''simple docstring'''
def __init__( self : str , __snake_case : set[int] , __snake_case : Mapping[EdgeT, int] ):
UpperCAmelCase_ = vertices
UpperCAmelCase_ = {
(min(__snake_case ), max(__snake_case )): weight for edge, weight in edges.items()
}
def lowerCamelCase_ ( self : Any , __snake_case : EdgeT , __snake_case : int ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
UpperCAmelCase_ = weight
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ = Graph({min(self.vertices )} , {} )
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
while len(subgraph.vertices ) < len(self.vertices ):
UpperCAmelCase_ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
UpperCAmelCase_ = edge
UpperCAmelCase_ = weight
subgraph.add_edge(__snake_case , __snake_case )
return subgraph
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "p107_network.txt" ) -> int:
UpperCAmelCase_ = os.path.abspath(os.path.dirname(__UpperCamelCase ) )
UpperCAmelCase_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase_ = {}
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
with open(__UpperCamelCase ) as f:
UpperCAmelCase_ = f.read().strip().split('''\n''' )
UpperCAmelCase_ = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(__UpperCamelCase ) ):
for edgea in range(__UpperCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
UpperCAmelCase_ = int(adjaceny_matrix[edgea][edgea] )
UpperCAmelCase_ = Graph(set(range(len(__UpperCamelCase ) ) ) , __UpperCamelCase )
UpperCAmelCase_ = graph.prims_algorithm()
UpperCAmelCase_ = sum(graph.edges.values() )
UpperCAmelCase_ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 177 | 1 |
"""simple docstring"""
import os
def _snake_case ( lowercase__ : str = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) as in_file:
lowerCAmelCase_ :str = in_file.read()
lowerCAmelCase_ :Tuple = [[int(lowercase__ ) for cell in row.split(""",""" )] for row in data.strip().splitlines()]
lowerCAmelCase_ :Tuple = [[0 for cell in row] for row in grid]
lowerCAmelCase_ :str = len(grid[0] )
lowerCAmelCase_ :Union[str, Any] = [[0 for i in range(lowercase__ )] for j in range(lowercase__ )]
lowerCAmelCase_ :Optional[Any] = grid[0][0]
for i in range(1 , lowercase__ ):
lowerCAmelCase_ :Optional[int] = grid[0][i] + dp[0][i - 1]
for i in range(1 , lowercase__ ):
lowerCAmelCase_ :str = grid[i][0] + dp[i - 1][0]
for i in range(1 , lowercase__ ):
for j in range(1 , lowercase__ ):
lowerCAmelCase_ :Dict = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 84 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class UpperCamelCase ( nn.Module ):
def __init__( self : Union[str, Any] ) -> int:
super().__init__()
_a : Optional[Any] = nn.Linear(3 , 4 )
_a : Tuple = nn.BatchNormad(4 )
_a : Dict = nn.Linear(4 , 5 )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> int:
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase__ ) ) )
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Any , UpperCAmelCase__ : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
return (args[0] + 1,) + args[1:], kwargs
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
return output + 1
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Dict ) -> str:
_a : List[Any] = ModelForTest()
_a : str = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(test_model._hf_hook , UpperCAmelCase__ )
self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) )
self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
_a : Dict = ModelForTest()
_a : Dict = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ , append=UpperCAmelCase__ )
self.assertEqual(isinstance(test_model._hf_hook , UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) )
self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
def _lowercase ( self : Dict ) -> int:
_a : str = ModelForTest()
_a : List[Any] = torch.randn(2 , 3 )
_a : Optional[Any] = test_model(x + 1 )
_a : str = test_model(x + 2 )
_a : Union[str, Any] = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a : int = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : str = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a : int = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 )
def _lowercase ( self : Tuple ) -> int:
_a : Tuple = ModelForTest()
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : Optional[int] = test_model(UpperCAmelCase__ )
_a : int = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a : List[Any] = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Dict = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a : Any = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Optional[int] = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , output + 2 , atol=1E-5 )
def _lowercase ( self : Dict ) -> Optional[Any]:
_a : Any = ModelForTest()
_a : List[Any] = torch.randn(2 , 3 )
_a : Dict = test_model(UpperCAmelCase__ )
_a : Any = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_a : Any = True
_a : Union[str, Any] = test_model(UpperCAmelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _lowercase ( self : Optional[Any] ) -> str:
_a : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_a : Optional[int] = torch.randn(2 , 3 )
_a : Any = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(UpperCAmelCase__ , AlignDevicesHook(io_same_device=UpperCAmelCase__ ) )
_a : str = torch.randn(2 , 3 ).to(0 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(0 ) )
def _lowercase ( self : str ) -> Union[str, Any]:
_a : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : List[Any] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Dict = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : int = torch.randn(2 , 3 )
_a : str = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
_a : List[str] = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : Tuple = torch.randn(2 , 3 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _lowercase ( self : Tuple ) -> List[str]:
_a : str = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Dict = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : List[Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , offload_buffers=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : List[str] = torch.randn(2 , 3 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _lowercase ( self : Dict ) -> str:
_a : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Union[str, Any] = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : int = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() , offload_buffers=UpperCAmelCase__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : Any = torch.randn(2 , 3 )
_a : int = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 294 | 0 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowercase_ = datasets.logging.get_logger(__name__)
lowercase_ = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
lowercase_ = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
lowercase_ = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
lowercase_ = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def _snake_case ( self: int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , )
def _snake_case ( self: Tuple , a: List[Any] ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' )
__lowerCamelCase : Optional[int] = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
__lowerCamelCase : List[str] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__lowerCamelCase : List[str] = self.config_name.upper()
else:
raise KeyError(
F'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' )
# download the model checkpoint specified by self.config_name and set up the scorer
__lowerCamelCase : Union[str, Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__lowerCamelCase : Tuple = score.BleurtScorer(os.path.join(a , a ) )
def _snake_case ( self: List[Any] , a: Union[str, Any] , a: str ):
__lowerCamelCase : Tuple = self.scorer.score(references=a , candidates=a )
return {"scores": scores}
| 194 |
from __future__ import annotations
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = str(SCREAMING_SNAKE_CASE__ )
return len(SCREAMING_SNAKE_CASE__ ) == 9 and set(SCREAMING_SNAKE_CASE__ ) == set('123456789' )
def UpperCamelCase__ ( ):
for base_num in range(9_999 , 4_999 , -1 ):
__lowerCamelCase : Tuple = 100_002 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE__ ):
return candidate
for base_num in range(333 , 99 , -1 ):
__lowerCamelCase : Union[str, Any] = 1_002_003 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 194 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = torch.device("cpu")
def lowerCAmelCase_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase__: List[Any] ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__: Tuple =Image.open(requests.get(__a , stream=__a ).raw )
return im
def lowerCAmelCase_ ( __a ) -> Optional[int]:
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: int =dct.pop(__a )
lowerCamelCase__: Any =val
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
lowerCamelCase__: Optional[int] =[]
for k in state_dict.keys():
lowerCamelCase__: Optional[Any] =k
if ".pwconv" in k:
lowerCamelCase__: int =k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
lowerCamelCase__: Tuple =k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
lowerCamelCase__: List[Any] =k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
lowerCamelCase__: Any =k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
lowerCamelCase__: int =k_new.split("." )
if ls[2].isdigit():
lowerCamelCase__: List[Any] ="""swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
lowerCamelCase__: Any =k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[Any] =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase__: Optional[Any] =1000
lowerCamelCase__: Tuple ="""huggingface/label-files"""
lowerCamelCase__: List[str] ="""imagenet-1k-id2label.json"""
lowerCamelCase__: Dict =json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
lowerCamelCase__: Dict ={int(__a ): v for k, v in idalabel.items()}
lowerCamelCase__: str =idalabel
lowerCamelCase__: Dict ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowerCamelCase__: Any =[3, 3, 6, 4]
lowerCamelCase__: List[str] =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowerCamelCase__: Dict =[3, 3, 9, 6]
lowerCamelCase__: Union[str, Any] =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowerCamelCase__: int =[4, 3, 10, 5]
lowerCamelCase__: Optional[int] =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowerCamelCase__: Union[str, Any] =[4, 4, 12, 6]
lowerCamelCase__: List[Any] =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
lowerCamelCase__: List[str] =torch.hub.load_state_dict_from_url(__a , map_location="cpu" , check_hash=__a )
else:
lowerCamelCase__: Any =torch.load(__a , map_location="cpu" )
lowerCamelCase__: Optional[Any] =checkpoint
lowerCamelCase__: Dict =create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
lowerCamelCase__: List[Any] =SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
lowerCamelCase__: Dict =prepare_img()
lowerCamelCase__: Tuple =ViTImageProcessor.from_pretrained("preprocessor_config" )
lowerCamelCase__: Optional[int] =processor(images=__a , return_tensors="pt" )
# compare outputs from both models
lowerCamelCase__: Optional[int] =get_expected_output(__a )
lowerCamelCase__: List[str] =hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
__A = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 10 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
A: str = logging.get_logger(__name__)
A: List[Any] = {"vocab_file": "vocab.txt"}
A: List[str] = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
A: Dict = {
"facebook/esm2_t6_8M_UR50D": 1_0_2_4,
"facebook/esm2_t12_35M_UR50D": 1_0_2_4,
}
def _snake_case ( UpperCamelCase : int ):
with open(UpperCamelCase , """r""" ) as f:
UpperCAmelCase : int = f.read().splitlines()
return [l.strip() for l in lines]
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : str = ['input_ids', 'attention_mask']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<cls>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE="<eos>" , **_SCREAMING_SNAKE_CASE , ) -> Any:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = load_vocab_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = dict(enumerate(self.all_tokens ) )
UpperCAmelCase : List[str] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCAmelCase : Any = unk_token
UpperCAmelCase : str = cls_token
UpperCAmelCase : int = pad_token
UpperCAmelCase : Tuple = mask_token
UpperCAmelCase : str = eos_token
UpperCAmelCase : List[str] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
return text.split()
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
'''simple docstring'''
return len(self._id_to_token )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [self.cls_token_id]
UpperCAmelCase : Tuple = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCAmelCase : str = [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
if token_ids_a is not None:
mask += [0] * len(_SCREAMING_SNAKE_CASE ) + [1]
return mask
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> int:
'''simple docstring'''
return super()._add_tokens(_SCREAMING_SNAKE_CASE , special_tokens=_SCREAMING_SNAKE_CASE )
| 109 | 0 |
def lowercase_ ( A__ ) -> bool:
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
snake_case = sorted(string.lower() )
return len(__lowerCAmelCase ) == len(set(__lowerCAmelCase ) )
if __name__ == "__main__":
_A = input("Enter a string ").strip()
_A = is_isogram(input_str)
print(f"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 350 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowercase_ ( A__ ) -> str:
"""simple docstring"""
return getitem, k
def lowercase_ ( A__ , A__ ) -> str:
"""simple docstring"""
return setitem, k, v
def lowercase_ ( A__ ) -> List[Any]:
"""simple docstring"""
return delitem, k
def lowercase_ ( A__ , A__ , *A__ ) -> str:
"""simple docstring"""
try:
return fun(A__ , *A__ ), None
except Exception as e:
return None, e
_A = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
_A = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
_A = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
_A = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
_A = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
_A = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def lowercase_ ( A__ ) -> List[Any]:
"""simple docstring"""
snake_case = HashMap(initial_block_size=4 )
snake_case = {}
for _, (fun, *args) in enumerate(A__ ):
snake_case , snake_case = _run_operation(A__ , A__ , *A__ )
snake_case , snake_case = _run_operation(A__ , A__ , *A__ )
assert my_res == py_res
assert str(A__ ) == str(A__ )
assert set(A__ ) == set(A__ )
assert len(A__ ) == len(A__ )
assert set(my.items() ) == set(py.items() )
def lowercase_ ( ) -> Optional[int]:
"""simple docstring"""
def is_public(A__ ) -> bool:
return not name.startswith("_" )
snake_case = {name for name in dir({} ) if is_public(A__ )}
snake_case = {name for name in dir(HashMap() ) if is_public(A__ )}
assert dict_public_names > hash_public_names
| 137 | 0 |
def __magic_name__ ( __a : Any ):
'''simple docstring'''
UpperCamelCase__ = [0] * len(__UpperCamelCase )
for i in range(1 , len(__UpperCamelCase ) ):
# use last results for better performance - dynamic programming
UpperCamelCase__ = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCamelCase__ = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCamelCase__ = j
return prefix_result
def __magic_name__ ( __a : Tuple ):
'''simple docstring'''
return max(prefix_function(__UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 244 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase_ = random.Random()
if is_torch_available():
import torch
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None ):
"""simple docstring"""
if rng is None:
__A = global_rng
__A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any, _lowerCamelCase : List[str], _lowerCamelCase : Any=7, _lowerCamelCase : Optional[int]=4_00, _lowerCamelCase : Optional[int]=20_00, _lowerCamelCase : Dict=1, _lowerCamelCase : Optional[Any]=0.0, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : Dict=True, ):
'''simple docstring'''
__A = parent
__A = batch_size
__A = min_seq_length
__A = max_seq_length
__A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__A = feature_size
__A = padding_value
__A = sampling_rate
__A = return_attention_mask
__A = do_normalize
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[Any]=False, _lowerCamelCase : int=False ):
'''simple docstring'''
def _flatten(_lowerCamelCase : List[str] ):
return list(itertools.chain(*_lowerCamelCase ) )
if equal_length:
__A = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__A = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
__A = [np.asarray(_lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : int = ASTFeatureExtractor
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = ASTFeatureExtractionTester(self )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
__A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__A = [floats_list((1, x) )[0] for x in range(8_00, 14_00, 2_00 )]
__A = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
__A = feat_extract(speech_inputs[0], return_tensors='''np''' ).input_values
__A = feat_extract(np_speech_inputs[0], return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) )
# Test batched
__A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values
__A = feat_extract(_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ):
self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__A = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__A = np.asarray(_lowerCamelCase )
__A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values
__A = feat_extract(_lowerCamelCase, return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase, _lowerCamelCase ):
self.assertTrue(np.allclose(_lowerCamelCase, _lowerCamelCase, atol=1e-3 ) )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
import torch
__A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A = np.random.rand(1_00 ).astype(np.floataa )
__A = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__A = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
from datasets import load_dataset
__A = load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' )
# automatic decoding with librispeech
__A = ds.sort('''id''' ).select(range(_lowerCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
# fmt: off
__A = torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] )
# fmt: on
__A = self._load_datasamples(1 )
__A = ASTFeatureExtractor()
__A = feature_extractor(_lowerCamelCase, return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape, (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30], _lowerCamelCase, atol=1e-4 ) )
| 266 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class a_ :
def __init__( self : Tuple , lowercase : int = 6 ):
"""simple docstring"""
lowercase_ :Node | None = None
lowercase_ :Node | None = None
self.create_linked_list(lowercase )
def lowercase__ ( self : int , lowercase : int ):
"""simple docstring"""
lowercase_ :List[Any] = Node()
lowercase_ :List[Any] = current_node
lowercase_ :str = current_node
lowercase_ :Optional[Any] = current_node
for _ in range(1 , lowercase ):
lowercase_ :List[str] = Node()
lowercase_ :Optional[int] = current_node
lowercase_ :int = previous_node
lowercase_ :str = current_node
lowercase_ :Optional[Any] = self.front
lowercase_ :List[Any] = previous_node
def lowercase__ ( self : List[str] ):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowercase__ ( self : str ):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowercase__ ( self : str , lowercase : Any ):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase_ :str = self.rear.next
if self.rear:
lowercase_ :List[str] = data
def lowercase__ ( self : Dict ):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase_ :Tuple = self.front.data
lowercase_ :Any = None
return data
lowercase_ :str = self.front
lowercase_ :str = old_front.next
lowercase_ :int = old_front.data
lowercase_ :Optional[int] = None
return data
def lowercase__ ( self : Any ):
"""simple docstring"""
if self.is_empty():
raise Exception("Empty Queue" )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("Full Queue" )
class a_ :
def __init__( self : Any ):
"""simple docstring"""
lowercase_ :Any | None = None
lowercase_ :Node | None = None
lowercase_ :Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : list ):
if len(__lowerCamelCase ) <= 1:
return lst
lowercase_ :Optional[Any] = 1
while i < len(__lowerCamelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
lowercase_ , lowercase_ :int = lst[i], lst[i - 1]
i -= 1
if i == 0:
lowercase_ :Dict = 1
return lst
if __name__ == "__main__":
lowerCAmelCase : Any =input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase : List[str] =[int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 147 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
if index == number_of_items:
return 0
lowercase__: Any = 0
lowercase__: List[Any] = 0
lowercase__: Any = knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , index + 1 )
if weights[index] <= max_weight:
lowercase__: str = values[index] + knapsack(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , max_weight - weights[index] , index + 1 )
return max(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177 | """simple docstring"""
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase = False ) -> float:
if not arr:
return 0
lowercase__: Any = 0 if allow_empty_subarrays else float('''-inf''' )
lowercase__: Union[str, Any] = 0.0
for num in arr:
lowercase__: List[str] = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase__: int = max(__UpperCAmelCase , __UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
__A = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'''{max_subarray_sum(nums) = }''')
| 177 | 1 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
# TODO Update this
lowerCAmelCase_ = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = "esm"
def __init__( self : List[Any] ,_snake_case : int=None ,_snake_case : Optional[Any]=None ,_snake_case : Optional[int]=None ,_snake_case : List[str]=768 ,_snake_case : Optional[int]=12 ,_snake_case : Optional[int]=12 ,_snake_case : Optional[Any]=3_072 ,_snake_case : Union[str, Any]=0.1 ,_snake_case : int=0.1 ,_snake_case : Optional[Any]=1_026 ,_snake_case : Optional[int]=0.02 ,_snake_case : int=1e-12 ,_snake_case : Dict="absolute" ,_snake_case : Optional[int]=True ,_snake_case : Union[str, Any]=None ,_snake_case : Dict=False ,_snake_case : List[str]=False ,_snake_case : Optional[Any]=None ,_snake_case : Any=None ,**_snake_case : Optional[Any] ,) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case ,mask_token_id=_snake_case ,**_snake_case )
lowercase__ : Tuple = vocab_size
lowercase__ : Tuple = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : int = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : Optional[int] = max_position_embeddings
lowercase__ : Dict = initializer_range
lowercase__ : int = layer_norm_eps
lowercase__ : int = position_embedding_type
lowercase__ : Union[str, Any] = use_cache
lowercase__ : List[Any] = emb_layer_norm_before
lowercase__ : Any = token_dropout
lowercase__ : List[str] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
lowercase__ : Dict = EsmFoldConfig()
elif isinstance(_snake_case ,_snake_case ):
lowercase__ : Dict = EsmFoldConfig(**_snake_case )
lowercase__ : Optional[Any] = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
lowercase__ : Tuple = get_default_vocab_list()
else:
lowercase__ : str = vocab_list
else:
lowercase__ : str = None
lowercase__ : Union[str, Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,'''use_esm_attn_map''' ,_snake_case ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def UpperCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config ,_snake_case ):
lowercase__ : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = None
lowerCAmelCase : bool = True
lowerCAmelCase : bool = False
lowerCAmelCase : bool = False
lowerCAmelCase : bool = False
lowerCAmelCase : float = 0
lowerCAmelCase : bool = True
lowerCAmelCase : bool = False
lowerCAmelCase : int = 1_2_8
lowerCAmelCase : "TrunkConfig" = None
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if self.trunk is None:
lowercase__ : int = TrunkConfig()
elif isinstance(self.trunk ,_snake_case ):
lowercase__ : Any = TrunkConfig(**self.trunk )
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = asdict(self )
lowercase__ : List[str] = self.trunk.to_dict()
return output
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : int = 4_8
lowerCAmelCase : int = 1_0_2_4
lowerCAmelCase : int = 1_2_8
lowerCAmelCase : int = 3_2
lowerCAmelCase : int = 3_2
lowerCAmelCase : int = 3_2
lowerCAmelCase : float = 0
lowerCAmelCase : float = 0
lowerCAmelCase : bool = False
lowerCAmelCase : int = 4
lowerCAmelCase : Optional[int] = 1_2_8
lowerCAmelCase : "StructureModuleConfig" = None
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
if self.structure_module is None:
lowercase__ : Tuple = StructureModuleConfig()
elif isinstance(self.structure_module ,_snake_case ):
lowercase__ : Tuple = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
lowercase__ : Optional[int] = self.sequence_state_dim // self.sequence_head_width
lowercase__ : Any = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def UpperCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = asdict(self )
lowercase__ : Dict = self.structure_module.to_dict()
return output
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : int = 3_8_4
lowerCAmelCase : int = 1_2_8
lowerCAmelCase : int = 1_6
lowerCAmelCase : int = 1_2_8
lowerCAmelCase : int = 1_2
lowerCAmelCase : int = 4
lowerCAmelCase : int = 8
lowerCAmelCase : float = 0.1
lowerCAmelCase : int = 8
lowerCAmelCase : int = 1
lowerCAmelCase : int = 2
lowerCAmelCase : int = 7
lowerCAmelCase : int = 1_0
lowerCAmelCase : float = 1e-8
lowerCAmelCase : float = 1e5
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return asdict(self )
def __UpperCAmelCase ( ) -> Any:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 362 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase_ = logging.get_logger(__name__)
# General docstring
lowerCAmelCase_ = 'RegNetConfig'
# Base docstring
lowerCAmelCase_ = 'facebook/regnet-y-040'
lowerCAmelCase_ = [1, 1_088, 7, 7]
# Image classification docstring
lowerCAmelCase_ = 'facebook/regnet-y-040'
lowerCAmelCase_ = 'tabby, tabby cat'
lowerCAmelCase_ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : int ,_snake_case : int ,_snake_case : int ,_snake_case : int = 3 ,_snake_case : int = 1 ,_snake_case : int = 1 ,_snake_case : Optional[str] = "relu" ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Tuple = nn.Convad(
_snake_case ,_snake_case ,kernel_size=_snake_case ,stride=_snake_case ,padding=kernel_size // 2 ,groups=_snake_case ,bias=_snake_case ,)
lowercase__ : List[Any] = nn.BatchNormad(_snake_case )
lowercase__ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.convolution(_snake_case )
lowercase__ : Tuple = self.normalization(_snake_case )
lowercase__ : Tuple = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : RegNetConfig ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = RegNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act )
lowercase__ : str = config.num_channels
def UpperCAmelCase ( self : int ,_snake_case : Dict ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowercase__ : Optional[int] = self.embedder(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : str ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ) -> Any:
"""simple docstring"""
super().__init__()
lowercase__ : List[str] = nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ,stride=_snake_case ,bias=_snake_case )
lowercase__ : Any = nn.BatchNormad(_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ) -> Tensor:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.convolution(_snake_case )
lowercase__ : Optional[int] = self.normalization(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : int ,_snake_case : int ) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ : Any = nn.AdaptiveAvgPoolad((1, 1) )
lowercase__ : Dict = nn.Sequential(
nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.Sigmoid() ,)
def UpperCAmelCase ( self : int ,_snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.pooler(_snake_case )
lowercase__ : Union[str, Any] = self.attention(_snake_case )
lowercase__ : List[str] = hidden_state * attention
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> List[str]:
"""simple docstring"""
super().__init__()
lowercase__ : Tuple = in_channels != out_channels or stride != 1
lowercase__ : Optional[int] = max(1 ,out_channels // config.groups_width )
lowercase__ : str = (
RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase__ : Optional[int] = nn.Sequential(
RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,)
lowercase__ : str = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = hidden_state
lowercase__ : Union[str, Any] = self.layer(_snake_case )
lowercase__ : List[Any] = self.shortcut(_snake_case )
hidden_state += residual
lowercase__ : Optional[int] = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = in_channels != out_channels or stride != 1
lowercase__ : List[str] = max(1 ,out_channels // config.groups_width )
lowercase__ : Tuple = (
RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase__ : str = nn.Sequential(
RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetSELayer(_snake_case ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,)
lowercase__ : Optional[Any] = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : str = hidden_state
lowercase__ : Optional[Any] = self.layer(_snake_case )
lowercase__ : int = self.shortcut(_snake_case )
hidden_state += residual
lowercase__ : str = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ,_snake_case : int = 2 ,) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[Any] = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
lowercase__ : Optional[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_snake_case ,_snake_case ,_snake_case ,stride=_snake_case ,) ,*[layer(_snake_case ,_snake_case ,_snake_case ) for _ in range(depth - 1 )] ,)
def UpperCAmelCase ( self : Tuple ,_snake_case : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.layers(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_snake_case : RegNetConfig ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : str = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_snake_case ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
lowercase__ : str = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_snake_case ,config.depths[1:] ):
self.stages.append(RegNetStage(_snake_case ,_snake_case ,_snake_case ,depth=_snake_case ) )
def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ,_snake_case : bool = False ,_snake_case : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
lowercase__ : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ : int = hidden_states + (hidden_state,)
lowercase__ : Any = stage_module(_snake_case )
if output_hidden_states:
lowercase__ : Optional[int] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_snake_case ,hidden_states=_snake_case )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = RegNetConfig
lowerCAmelCase : List[Any] = "regnet"
lowerCAmelCase : Optional[int] = "pixel_values"
lowerCAmelCase : Union[str, Any] = True
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='''fan_out''' ,nonlinearity='''relu''' )
elif isinstance(_snake_case ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Any=False ) -> Optional[int]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
lowercase__ : str = value
lowerCAmelCase_ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." ,A_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : Any = config
lowercase__ : List[str] = RegNetEmbeddings(_snake_case )
lowercase__ : Any = RegNetEncoder(_snake_case )
lowercase__ : Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def UpperCAmelCase ( self : Dict ,_snake_case : Tensor ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
lowercase__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Union[str, Any] = self.embedder(_snake_case )
lowercase__ : List[Any] = self.encoder(
_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case )
lowercase__ : str = encoder_outputs[0]
lowercase__ : Optional[int] = self.pooler(_snake_case )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_snake_case ,pooler_output=_snake_case ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,A_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __A ( A_ ):
'''simple docstring'''
def __init__( self : int ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : Optional[Any] = config.num_labels
lowercase__ : int = RegNetModel(_snake_case )
# classification head
lowercase__ : str = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def UpperCAmelCase ( self : List[Any] ,_snake_case : Optional[torch.FloatTensor] = None ,_snake_case : Optional[torch.LongTensor] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
lowercase__ : Any = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : List[Any] = self.regnet(_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case )
lowercase__ : List[str] = outputs.pooler_output if return_dict else outputs[1]
lowercase__ : Union[str, Any] = self.classifier(_snake_case )
lowercase__ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ : List[Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ : Dict = '''single_label_classification'''
else:
lowercase__ : Optional[int] = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase__ : Union[str, Any] = MSELoss()
if self.num_labels == 1:
lowercase__ : List[Any] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
lowercase__ : Tuple = loss_fct(_snake_case ,_snake_case )
elif self.config.problem_type == "single_label_classification":
lowercase__ : Tuple = CrossEntropyLoss()
lowercase__ : str = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ : Any = BCEWithLogitsLoss()
lowercase__ : Union[str, Any] = loss_fct(_snake_case ,_snake_case )
if not return_dict:
lowercase__ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states )
| 302 | 0 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = get_activation('''swish''')
self.assertIsInstance(__a , nn.SiLU)
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = get_activation('''silu''')
self.assertIsInstance(__a , nn.SiLU)
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = get_activation('''mish''')
self.assertIsInstance(__a , nn.Mish)
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = get_activation('''gelu''')
self.assertIsInstance(__a , nn.GELU)
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
| 194 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
_UpperCamelCase = ksize + 1
_UpperCamelCase = np.zeros((ksize, ksize), dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
_UpperCamelCase = x - ksize // 2
_UpperCamelCase = y - ksize // 2
# degree to radiant
_UpperCamelCase = theta / 1_80 * np.pi
_UpperCamelCase = np.cos(_theta )
_UpperCamelCase = np.sin(_theta )
# get kernel x
_UpperCamelCase = cos_theta * px + sin_theta * py
# get kernel y
_UpperCamelCase = -sin_theta * px + cos_theta * py
# fill kernel
_UpperCamelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_a = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
_a = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_a = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_a = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_a = out / out.max() * 255
_a = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 194 | 1 |
'''simple docstring'''
def __a ( _UpperCamelCase: Optional[int] , _UpperCamelCase: int , _UpperCamelCase: List[str] , _UpperCamelCase: Dict ) -> Tuple:
"""simple docstring"""
_snake_case = [False] * len(_UpperCamelCase )
_snake_case = []
queue.append(_UpperCamelCase )
_snake_case = True
while queue:
_snake_case = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
_snake_case = True
_snake_case = u
return visited[t]
def __a ( _UpperCamelCase: List[str] , _UpperCamelCase: Any , _UpperCamelCase: str ) -> Optional[int]:
"""simple docstring"""
_snake_case = [-1] * (len(_UpperCamelCase ))
_snake_case = 0
while bfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_snake_case = float("Inf" )
_snake_case = sink
while s != source:
# Find the minimum value in select path
_snake_case = min(_UpperCamelCase , graph[parent[s]][s] )
_snake_case = parent[s]
max_flow += path_flow
_snake_case = sink
while v != source:
_snake_case = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_snake_case = parent[v]
return max_flow
UpperCamelCase_ : str = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 142 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def __a ( _UpperCamelCase: Tuple , _UpperCamelCase: Optional[int] , _UpperCamelCase: Optional[int]=None , **_UpperCamelCase: Any ) -> Optional[Any]:
"""simple docstring"""
_snake_case = [x.strip() for x in open(_UpperCamelCase ).readlines()]
_snake_case = [x.strip() for x in open(_UpperCamelCase ).readlines()][: len(_UpperCamelCase )]
_snake_case = calculate_rouge(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
if save_path is not None:
save_json(_UpperCamelCase , _UpperCamelCase , indent=_UpperCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 142 | 1 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _lowercase ( _lowercase , _lowercase ):
a = 1
@register_to_config
def __init__( self: List[Any] , UpperCamelCase__: Any=2_000 , UpperCamelCase__: Any=0.1 , UpperCamelCase__: Union[str, Any]=20 , UpperCamelCase__: int=1e-3 ):
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Dict = None
def lowerCamelCase_ ( self: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Union[str, torch.device] = None ):
lowerCamelCase__ : List[Any] = torch.linspace(1 , self.config.sampling_eps , UpperCamelCase__ , device=UpperCamelCase__ )
def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: Dict , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any]=None ):
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCamelCase__ : Dict = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCamelCase__ : Optional[int] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCamelCase__ : Dict = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCamelCase__ : Any = std.unsqueeze(-1 )
lowerCamelCase__ : Dict = -score / std
# compute
lowerCamelCase__ : Any = -1.0 / len(self.timesteps )
lowerCamelCase__ : Optional[int] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCamelCase__ : List[str] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCamelCase__ : Tuple = beta_t.unsqueeze(-1 )
lowerCamelCase__ : Union[str, Any] = -0.5 * beta_t * x
lowerCamelCase__ : List[str] = torch.sqrt(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = drift - diffusion**2 * score
lowerCamelCase__ : Dict = x + drift * dt
# add noise
lowerCamelCase__ : int = randn_tensor(x.shape , layout=x.layout , generator=UpperCamelCase__ , device=x.device , dtype=x.dtype )
lowerCamelCase__ : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self: Dict ):
return self.config.num_train_timesteps
| 41 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = test_file.split(os.path.sep)
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F'''{test_file} instead.''')
SCREAMING_SNAKE_CASE = components[-1]
if not test_fn.endswith('py'):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''')
if not test_fn.startswith('test_modeling_'):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''')
SCREAMING_SNAKE_CASE = components[:-1] + [test_fn.replace('.py' , '')]
SCREAMING_SNAKE_CASE = '.'.join(_UpperCAmelCase)
return test_module_path
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_module_path(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = importlib.import_module(_UpperCAmelCase)
return test_module
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = get_test_module(_UpperCAmelCase)
for attr in dir(_UpperCAmelCase):
if attr.endswith('ModelTester'):
tester_classes.append(getattr(_UpperCAmelCase , _UpperCAmelCase))
# sort with class names
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x.__name__)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = get_test_module(_UpperCAmelCase)
for attr in dir(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , _UpperCAmelCase)
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'all_model_classes' , [])
if len(_UpperCAmelCase) > 0:
test_classes.append(_UpperCAmelCase)
# sort with class names
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x.__name__)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_test_classes(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes)
# sort with class names
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x.__name__)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = test_class()
if hasattr(_UpperCAmelCase , 'setUp'):
test.setUp()
SCREAMING_SNAKE_CASE = None
if hasattr(_UpperCAmelCase , 'model_tester'):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
SCREAMING_SNAKE_CASE = test.model_tester.__class__
return model_tester
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_test_classes(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_UpperCAmelCase)
# sort with class names
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x.__name__)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_test_classes_for_model(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = []
for test_class in test_classes:
SCREAMING_SNAKE_CASE = get_model_tester_from_test_class(_UpperCAmelCase)
if tester_class is not None:
tester_classes.append(_UpperCAmelCase)
# sort with class names
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x.__name__)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_test_classes(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = {test_class: get_model_tester_from_test_class(_UpperCAmelCase) for test_class in test_classes}
return test_tester_mapping
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_model_classes(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = {
model_class: get_test_classes_for_model(_UpperCAmelCase , _UpperCAmelCase) for model_class in model_classes
}
return model_test_mapping
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_model_classes(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = {
model_class: get_tester_classes_for_model(_UpperCAmelCase , _UpperCAmelCase) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCamelCase__ (_UpperCAmelCase):
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
return o
elif isinstance(_UpperCAmelCase , _UpperCAmelCase):
return o.__name__
elif isinstance(_UpperCAmelCase , (list, tuple)):
return [to_json(_UpperCAmelCase) for x in o]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase):
return {to_json(_UpperCAmelCase): to_json(_UpperCAmelCase) for k, v in o.items()}
else:
return o
| 137 | 0 |
"""simple docstring"""
def snake_case (__lowercase ) -> list:
'''simple docstring'''
def merge(__lowercase , __lowercase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCAmelCase__ ) <= 1:
return collection
_snake_case : str = len(UpperCAmelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : Any = input('Enter numbers separated by a comma:\n').strip()
__SCREAMING_SNAKE_CASE : List[Any] = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',') | 371 | import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : int = args.pruning_method
_snake_case : List[Any] = args.threshold
_snake_case : Optional[Any] = args.model_name_or_path.rstrip("/" )
_snake_case : List[str] = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
_snake_case : List[Any] = torch.load(os.path.join(__lowercase , "pytorch_model.bin" ) )
_snake_case : List[str] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_snake_case : Tuple = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
_snake_case : Optional[int] = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
_snake_case : List[Any] = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
_snake_case : Tuple = MagnitudeBinarizer.apply(inputs=__lowercase , threshold=__lowercase )
_snake_case : List[str] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_snake_case : Optional[Any] = name[:-6]
_snake_case : Any = model[F"""{prefix_}mask_scores"""]
_snake_case : Tuple = TopKBinarizer.apply(__lowercase , __lowercase )
_snake_case : Optional[Any] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_snake_case : int = name[:-6]
_snake_case : List[Any] = model[F"""{prefix_}mask_scores"""]
_snake_case : List[str] = ThresholdBinarizer.apply(__lowercase , __lowercase , __lowercase )
_snake_case : List[str] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_snake_case : int = name[:-6]
_snake_case : Any = model[F"""{prefix_}mask_scores"""]
_snake_case ,_snake_case : Union[str, Any] = -0.1, 1.1
_snake_case : Dict = torch.sigmoid(__lowercase )
_snake_case : List[str] = s * (r - l) + l
_snake_case : Tuple = s_bar.clamp(min=0.0 , max=1.0 )
_snake_case : Union[str, Any] = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
_snake_case : Any = os.path.join(
os.path.dirname(__lowercase ) , F"""bertarized_{os.path.basename(__lowercase )}""" )
if not os.path.isdir(__lowercase ):
shutil.copytree(__lowercase , __lowercase )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(__lowercase , os.path.join(__lowercase , "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
main(args) | 284 | 0 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase__ : Dict = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowercase__ : Union[str, Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def a__ ( lowercase : str, lowercase : Dict, lowercase : Dict ) -> Dict:
"""simple docstring"""
_UpperCamelCase = SavedModel()
_UpperCamelCase = []
with open(os.path.join(lowerCAmelCase__, '''utils''', '''tf_ops''', '''onnx.json''' ) ) as f:
_UpperCamelCase = json.load(lowerCAmelCase__ )["""opsets"""]
for i in range(1, opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowerCAmelCase__ )] )
with open(lowerCAmelCase__, '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
_UpperCamelCase = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_UpperCamelCase = sorted(lowerCAmelCase__ )
_UpperCamelCase = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowerCAmelCase__ )
if strict and len(lowerCAmelCase__ ) > 0:
raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(lowerCAmelCase__ ) > 0:
print(F"""Found the following incompatible ops for the opset {opset}:""" )
print(*lowerCAmelCase__, sep='''\n''' )
else:
print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
lowercase__ : Union[str, Any] = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 324 |
from __future__ import annotations
from typing import Any
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_ = 6 ) -> None:
UpperCAmelCase_: Node | None = None
UpperCAmelCase_: Node | None = None
self.create_linked_list(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCAmelCase_: Optional[Any] = Node()
UpperCAmelCase_: Optional[Any] = current_node
UpperCAmelCase_: List[str] = current_node
UpperCAmelCase_: List[Any] = current_node
for _ in range(1, SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Optional[int] = Node()
UpperCAmelCase_: Dict = current_node
UpperCAmelCase_: Any = previous_node
UpperCAmelCase_: Tuple = current_node
UpperCAmelCase_: Optional[Any] = self.front
UpperCAmelCase_: Any = previous_node
def __snake_case (self ) -> bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __snake_case (self ) -> Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase_: Optional[int] = self.rear.next
if self.rear:
UpperCAmelCase_: Any = data
def __snake_case (self ) -> Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase_: Union[str, Any] = self.front.data
UpperCAmelCase_: Any = None
return data
UpperCAmelCase_: str = self.front
UpperCAmelCase_: Union[str, Any] = old_front.next
UpperCAmelCase_: int = old_front.data
UpperCAmelCase_: Any = None
return data
def __snake_case (self ) -> None:
if self.is_empty():
raise Exception("""Empty Queue""" )
def __snake_case (self ) -> None:
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class _a :
def __init__(self ) -> None:
UpperCAmelCase_: Any | None = None
UpperCAmelCase_: Node | None = None
UpperCAmelCase_: Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''blenderbot-small'''
SCREAMING_SNAKE_CASE_ =['''past_key_values''']
SCREAMING_SNAKE_CASE_ ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int , snake_case__ : str=5_0_2_6_5 , snake_case__ : int=5_1_2 , snake_case__ : Optional[int]=8 , snake_case__ : int=2_0_4_8 , snake_case__ : List[str]=1_6 , snake_case__ : Optional[int]=8 , snake_case__ : Optional[int]=2_0_4_8 , snake_case__ : int=1_6 , snake_case__ : Tuple=0.0 , snake_case__ : List[Any]=0.0 , snake_case__ : List[str]=True , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]="gelu" , snake_case__ : Any=5_1_2 , snake_case__ : List[Any]=0.1 , snake_case__ : int=0.0 , snake_case__ : Dict=0.0 , snake_case__ : List[str]=0.02 , snake_case__ : Optional[int]=1 , snake_case__ : Union[str, Any]=False , snake_case__ : Optional[Any]=0 , snake_case__ : Union[str, Any]=1 , snake_case__ : str=2 , snake_case__ : List[str]=2 , **snake_case__ : int , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : Optional[Any] = d_model
UpperCAmelCase__ : Any = encoder_ffn_dim
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Dict = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_ffn_dim
UpperCAmelCase__ : Dict = decoder_layers
UpperCAmelCase__ : Any = decoder_attention_heads
UpperCAmelCase__ : Optional[int] = dropout
UpperCAmelCase__ : int = attention_dropout
UpperCAmelCase__ : str = activation_dropout
UpperCAmelCase__ : List[Any] = activation_function
UpperCAmelCase__ : List[Any] = init_std
UpperCAmelCase__ : Any = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : Any = use_cache
UpperCAmelCase__ : List[Any] = encoder_layers
UpperCAmelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
class lowerCAmelCase__ ( __magic_name__ ):
@property
def __a ( self : Dict ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : Dict = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCAmelCase__ : Any = {0: "batch"}
UpperCAmelCase__ : int = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase__ : str = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase__ : Tuple = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase__ : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.num_layers
for i in range(snake_case__ ):
UpperCAmelCase__ : Tuple = {0: "batch", 2: "past_sequence + sequence"}
UpperCAmelCase__ : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
else:
UpperCAmelCase__ : Tuple = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : List[str] = super().outputs
else:
UpperCAmelCase__ : Tuple = super(snake_case__ , self ).outputs
if self.use_past:
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.num_layers
for i in range(snake_case__ ):
UpperCAmelCase__ : List[str] = {0: "batch", 2: "past_sequence + sequence"}
UpperCAmelCase__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __a ( self : str , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Generate decoder inputs
UpperCAmelCase__ : Dict = seq_length if not self.use_past else 1
UpperCAmelCase__ : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[int] = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase__ : str = dict(**snake_case__ , **snake_case__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = common_inputs["input_ids"].shape
UpperCAmelCase__ : Dict = common_inputs["decoder_input_ids"].shape[1]
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.num_attention_heads
UpperCAmelCase__ : Optional[int] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase__ : int = decoder_seq_length + 3
UpperCAmelCase__ : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase__ : Union[str, Any] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(snake_case__ , snake_case__ )] , dim=1 )
UpperCAmelCase__ : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.num_layers
UpperCAmelCase__ : Union[str, Any] = min(snake_case__ , snake_case__ )
UpperCAmelCase__ : int = max(snake_case__ , snake_case__ ) - min_num_layers
UpperCAmelCase__ : Tuple = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(snake_case__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case__ ),
torch.zeros(snake_case__ ),
torch.zeros(snake_case__ ),
torch.zeros(snake_case__ ),
) )
# TODO: test this.
UpperCAmelCase__ : str = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(snake_case__ , snake_case__ ):
common_inputs["past_key_values"].append((torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) )
return common_inputs
def __a ( self : List[Any] , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCAmelCase__ : List[str] = seqlen + 2
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.num_layers
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.num_attention_heads
UpperCAmelCase__ : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase__ : str = common_inputs["attention_mask"].dtype
UpperCAmelCase__ : Any = torch.cat(
[common_inputs["attention_mask"], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
UpperCAmelCase__ : List[str] = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(snake_case__ )
]
return common_inputs
def __a ( self : Optional[int] , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ):
'''simple docstring'''
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase__ : Optional[int] = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase__ : List[Any] = tokenizer.num_special_tokens_to_add(snake_case__ )
UpperCAmelCase__ : Any = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase__ : List[str] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase__ : List[str] = dict(tokenizer(snake_case__ , return_tensors=snake_case__ ) )
return common_inputs
def __a ( self : Optional[int] , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
elif self.task == "causal-lm":
UpperCAmelCase__ : Any = self._generate_dummy_inputs_for_causal_lm(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
else:
UpperCAmelCase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
return common_inputs
def __a ( self : Optional[Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ : List[str] = super()._flatten_past_key_values_(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
UpperCAmelCase__ : List[str] = super(snake_case__ , self )._flatten_past_key_values_(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
| 298 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=1_3 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Any=9_9 , snake_case__ : List[Any]=1_6 , snake_case__ : Any=3_6 , snake_case__ : Union[str, Any]=6 , snake_case__ : Tuple=6 , snake_case__ : List[str]=6 , snake_case__ : List[str]=3_7 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Any=4 , snake_case__ : int=None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : int = batch_size
UpperCAmelCase__ : int = seq_length
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Union[str, Any] = use_input_mask
UpperCAmelCase__ : Optional[Any] = use_token_type_ids
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : Any = embedding_size
UpperCAmelCase__ : List[str] = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : int = num_hidden_groups
UpperCAmelCase__ : Union[str, Any] = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : Tuple = num_labels
UpperCAmelCase__ : List[str] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self : Any ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __a ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = AlbertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , token_type_ids=snake_case__ )
UpperCAmelCase__ : Optional[int] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , sentence_order_label=snake_case__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AlbertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_labels
UpperCAmelCase__ : int = AlbertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : Any = AlbertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[str] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_choices
UpperCAmelCase__ : Optional[Any] = AlbertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Tuple = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[Any] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ =(
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ =True
def __a ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int]=False ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
UpperCAmelCase__ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ )
UpperCAmelCase__ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = AlbertModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def __a ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Dict = type
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __a ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained("albert-base-v2" )
UpperCAmelCase__ : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ )[0]
UpperCAmelCase__ : Dict = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
| 298 | 1 |
__lowerCamelCase : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowerCamelCase : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowerCamelCase : Union[str, Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 18 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , """width_multiplier""" ) )
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowercase : Union[str, Any] , __lowercase : Dict=13 , __lowercase : int=64 , __lowercase : Tuple=2 , __lowercase : Tuple=3 , __lowercase : Tuple="swish" , __lowercase : List[Any]=3 , __lowercase : List[str]=32 , __lowercase : int=0.1 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[int]=True , __lowercase : Dict=True , __lowercase : Tuple=10 , __lowercase : str=None , __lowercase : Optional[Any]=0.25 , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple ):
'''simple docstring'''
__a = MobileViTVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : int , __lowercase : str , __lowercase : Any , __lowercase : int , __lowercase : List[str] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase : Any =(
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Dict =False
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : int =False
__lowerCamelCase : Any =False
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__lowercase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(__lowercase : List[str] , __lowercase : Optional[int] , __lowercase : List[str] ):
__a = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(__lowercase ) , __lowercase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(__lowercase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__lowercase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__a = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowercase )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=__lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowercase )
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowercase )
| 302 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : int = 'bert'
def __init__( self : str , lowerCAmelCase__ : List[Any]=30522 , lowerCAmelCase__ : Tuple=768 , lowerCAmelCase__ : List[Any]=12 , lowerCAmelCase__ : Optional[int]=12 , lowerCAmelCase__ : str=3072 , lowerCAmelCase__ : Union[str, Any]="gelu" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Any=512 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Tuple=1e-1_2 , lowerCAmelCase__ : Union[str, Any]=0 , lowerCAmelCase__ : Tuple="absolute" , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Any=None , **lowerCAmelCase__ : List[Any] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
_UpperCamelCase = classifier_dropout
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
def snake_case__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 287 |
'''simple docstring'''
import math
def a__ ( lowercase : float, lowercase : float ) -> float:
"""simple docstring"""
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowercase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 287 | 1 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_A : Union[str, Any] = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
_A : str = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Tuple = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCamelCase__ : str = int(re.match(R'''.*layer_(\d*).*''' , UpperCAmelCase )[1] )
layer_number -= 3
return f"h.{layer_number}." + key
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
lowerCamelCase__ : Union[str, Any] = re.search(R'''[^\d](\d+)$''' , str(UpperCAmelCase ) )
if bit_search is None:
raise ValueError(f"`dtype` is not a valid dtype: {dtype}." )
lowerCamelCase__ : Tuple = int(bit_search.groups()[0] )
return bit_size // 8
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
# Construct model
if bloom_config_file == "":
lowerCamelCase__ : Tuple = BloomConfig()
else:
lowerCamelCase__ : int = BloomConfig.from_json_file(UpperCAmelCase )
if shard_model:
lowerCamelCase__ : List[Any] = os.listdir(UpperCAmelCase )
lowerCamelCase__ : Dict = sorted(filter(lambda UpperCAmelCase : s.startswith('''layer''' ) and "model_00" in s , UpperCAmelCase ) )
lowerCamelCase__ : Any = {'''weight_map''': {}, '''metadata''': {}}
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : int = BloomConfig()
for j, file in enumerate(UpperCAmelCase ):
print('''Processing file: {}'''.format(UpperCAmelCase ) )
lowerCamelCase__ : Any = None
for i in range(UpperCAmelCase ):
# load all TP files
lowerCamelCase__ : Dict = file.replace('''model_00''' , f"model_0{i}" )
lowerCamelCase__ : Optional[Any] = torch.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) , map_location='''cpu''' )
# Rename keys in the transformers names
lowerCamelCase__ : List[Any] = list(temp.keys() )
for key in keys:
lowerCamelCase__ : Tuple = temp.pop(UpperCAmelCase )
if tensors is None:
lowerCamelCase__ : Union[str, Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCamelCase__ : Optional[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCamelCase__ : Union[str, Any] = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCamelCase__ : List[Any] = tensors[key] / pretraining_tp
torch.save(
UpperCAmelCase , os.path.join(
UpperCAmelCase , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCamelCase__ : Tuple = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCamelCase__ : Optional[Any] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase ) ).zfill(5 ) )
lowerCamelCase__ : Tuple = BloomConfig()
lowerCamelCase__ : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
lowerCamelCase__ : List[str] = total_size
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCAmelCase , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCamelCase__ : Union[str, Any] = json.dumps(UpperCAmelCase , indent=2 , sort_keys=UpperCAmelCase ) + '''\n'''
f.write(UpperCAmelCase )
else:
lowerCamelCase__ : Union[str, Any] = BloomModel(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = os.listdir(UpperCAmelCase )
lowerCamelCase__ : Tuple = sorted(filter(lambda UpperCAmelCase : s.startswith('''layer''' ) and "model_00" in s , UpperCAmelCase ) )
lowerCamelCase__ : int = None
for i, file in enumerate(UpperCAmelCase ):
lowerCamelCase__ : int = None
for i in range(UpperCAmelCase ):
# load all TP files
lowerCamelCase__ : Dict = file.replace('''model_00''' , f"model_0{i}" )
lowerCamelCase__ : Optional[Any] = torch.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) , map_location='''cpu''' )
# Rename keys in the transformers names
lowerCamelCase__ : Optional[Any] = list(temp.keys() )
for key in keys:
lowerCamelCase__ : Tuple = temp.pop(UpperCAmelCase )
if tensors is None:
lowerCamelCase__ : Optional[int] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCamelCase__ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCamelCase__ : List[str] = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCamelCase__ : Dict = tensors[key] / pretraining_tp
lowerCamelCase__ : Dict = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert not other_keys.unexpected_keys, f"The keys {other_keys.unexpected_keys} are unexpected"
if missing_keys is None:
lowerCamelCase__ : Optional[Any] = set(other_keys.missing_keys )
else:
lowerCamelCase__ : Dict = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"The keys {missing_keys} are missing"
# Save pytorch-model
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
lowerCamelCase__ : Tuple = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowerCamelCase__ : Optional[int] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" )
if config.torch_dtype is not None:
lowerCamelCase__ : str = model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCAmelCase )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
_A : List[Any] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 142 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Any = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[str] = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 142 | 1 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
_a : Any = 'http://www.mocksite.com/file1.txt'
_a : str = '"text": ["foo", "foo"]'
_a : Union[str, Any] = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class __A :
_UpperCamelCase : List[str] = 200
_UpperCamelCase : Dict = {"Content-Length": "100"}
_UpperCamelCase : Optional[int] = {}
def __A ( self , **a__ ):
return [bytes(a__ , """utf-8""" )]
def SCREAMING_SNAKE_CASE ( *_lowerCamelCase : Optional[int] ,**_lowerCamelCase : Tuple ) -> List[Any]:
return MockResponse()
@pytest.mark.parametrize("""urls_type""" ,[str, list, dict] )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> List[Any]:
import requests
monkeypatch.setattr(_lowerCamelCase ,"""request""" ,_lowerCamelCase )
_lowerCAmelCase : int = URL
if issubclass(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : int = url
elif issubclass(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : List[Any] = [url]
elif issubclass(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : List[Any] = {"""train""": url}
_lowerCAmelCase : Union[str, Any] = """dummy"""
_lowerCAmelCase : List[str] = """downloads"""
_lowerCAmelCase : Any = tmp_path
_lowerCAmelCase : Optional[Any] = DownloadConfig(
cache_dir=os.path.join(_lowerCamelCase ,_lowerCamelCase ) ,use_etag=_lowerCamelCase ,)
_lowerCAmelCase : List[Any] = DownloadManager(dataset_name=_lowerCamelCase ,download_config=_lowerCamelCase )
_lowerCAmelCase : List[Any] = dl_manager.download(_lowerCamelCase )
_lowerCAmelCase : List[Any] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : List[str] = [downloaded_paths]
_lowerCAmelCase : Optional[Any] = [urls]
elif isinstance(_lowerCamelCase ,_lowerCamelCase ):
assert "train" in downloaded_paths.keys()
_lowerCAmelCase : Dict = downloaded_paths.values()
_lowerCAmelCase : List[Any] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCamelCase ,_lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_lowerCAmelCase : Optional[int] = Path(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_lowerCAmelCase : Dict = downloaded_path.read_text()
assert content == CONTENT
_lowerCAmelCase : Union[str, Any] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
_lowerCAmelCase : List[Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" ,[str, list, dict] )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : int ,_lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
_lowerCAmelCase : Optional[Any] = str(_lowerCamelCase )
if issubclass(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : str = filename
elif issubclass(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : List[Any] = [filename]
elif issubclass(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = {"""train""": filename}
_lowerCAmelCase : str = """dummy"""
_lowerCAmelCase : Tuple = xz_file.parent
_lowerCAmelCase : int = """extracted"""
_lowerCAmelCase : Dict = DownloadConfig(
cache_dir=_lowerCamelCase ,use_etag=_lowerCamelCase ,)
_lowerCAmelCase : List[Any] = DownloadManager(dataset_name=_lowerCamelCase ,download_config=_lowerCamelCase )
_lowerCAmelCase : Tuple = dl_manager.extract(_lowerCamelCase )
_lowerCAmelCase : List[Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : Tuple = [extracted_paths]
_lowerCAmelCase : Union[str, Any] = [paths]
elif isinstance(_lowerCamelCase ,_lowerCamelCase ):
assert "train" in extracted_paths.keys()
_lowerCAmelCase : Optional[int] = extracted_paths.values()
_lowerCAmelCase : Tuple = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCamelCase ,_lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_lowerCAmelCase : Optional[int] = Path(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCamelCase ,etag=_lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_lowerCAmelCase : Dict = extracted_path.read_text()
_lowerCAmelCase : List[Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : List[str] ) -> List[Any]:
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(_lowerCamelCase ,start=1 ):
_lowerCAmelCase : List[Any] = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" ,["""tar_jsonl_path""", """zip_jsonl_path"""] )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Dict ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = request.getfixturevalue(_lowerCamelCase )
_lowerCAmelCase : List[str] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) ,start=1 ):
_test_jsonl(_lowerCamelCase ,_lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" ,["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : str ) -> List[str]:
_lowerCAmelCase : Any = request.getfixturevalue(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) ,start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) ,start=1 ):
_test_jsonl(_lowerCamelCase ,_lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ) -> List[Any]:
_lowerCAmelCase : List[Any] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCamelCase ) ,start=1 ):
assert os.path.basename(_lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 126 | """simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Any ,_lowerCamelCase : Optional[Any] ) -> str:
_lowerCAmelCase : str = AutoConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : int = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase )
_lowerCAmelCase : Any = checkpoints.load_tax_checkpoint(_lowerCamelCase )
_lowerCAmelCase : Tuple = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
_lowerCAmelCase : Tuple = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_lowerCAmelCase : Optional[Any] = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : Union[str, Any] = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"""
""" attribute with a value from ['local', 'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
_lowerCAmelCase : Tuple = f"layers_{str(_lowerCamelCase )}"
# Self-Attention
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
_lowerCAmelCase : str = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
_lowerCAmelCase : str = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
_lowerCAmelCase : Any = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : Dict = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
_lowerCAmelCase : Any = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
_lowerCAmelCase : int = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
_lowerCAmelCase : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
_lowerCAmelCase : Any = flax_model.params["""encoder"""]["""block"""][str(_lowerCamelCase )]["""layer"""]
_lowerCAmelCase : Any = tax_attention_key
_lowerCAmelCase : str = tax_attention_out
_lowerCAmelCase : Union[str, Any] = tax_attention_query
_lowerCAmelCase : Optional[Any] = tax_attention_value
_lowerCAmelCase : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : Any = tax_global_layer_norm
if split_mlp_wi:
_lowerCAmelCase : Dict = tax_mlp_wi_a
_lowerCAmelCase : List[Any] = tax_mlp_wi_a
else:
_lowerCAmelCase : List[str] = tax_mlp_wi
_lowerCAmelCase : str = tax_mlp_wo
_lowerCAmelCase : Optional[Any] = tax_mlp_layer_norm
_lowerCAmelCase : Any = flax_model_encoder_layer_block
# Only for layer 0:
_lowerCAmelCase : Union[str, Any] = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
_lowerCAmelCase : Optional[Any] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
_lowerCAmelCase : Optional[int] = tax_encoder_global_rel_embedding
# Assigning
_lowerCAmelCase : Any = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
_lowerCAmelCase : Any = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_lowerCAmelCase : Optional[int] = f"layers_{str(_lowerCamelCase )}"
# Self-Attention
_lowerCAmelCase : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
_lowerCAmelCase : int = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
_lowerCAmelCase : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
_lowerCAmelCase : str = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Optional[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
_lowerCAmelCase : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
_lowerCAmelCase : List[str] = tax_enc_dec_attention_module["""key"""]["""kernel"""]
_lowerCAmelCase : List[Any] = tax_enc_dec_attention_module["""out"""]["""kernel"""]
_lowerCAmelCase : List[str] = tax_enc_dec_attention_module["""query"""]["""kernel"""]
_lowerCAmelCase : Dict = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
_lowerCAmelCase : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
_lowerCAmelCase : str = flax_model.params["""decoder"""]["""block"""][str(_lowerCamelCase )]["""layer"""]
_lowerCAmelCase : int = tax_attention_key
_lowerCAmelCase : List[str] = tax_attention_out
_lowerCAmelCase : Optional[Any] = tax_attention_query
_lowerCAmelCase : Dict = tax_attention_value
_lowerCAmelCase : str = tax_pre_attention_layer_norm
_lowerCAmelCase : List[Any] = tax_enc_dec_attention_key
_lowerCAmelCase : List[Any] = tax_enc_dec_attention_out
_lowerCAmelCase : Tuple = tax_enc_dec_attention_query
_lowerCAmelCase : Any = tax_enc_dec_attention_value
_lowerCAmelCase : Dict = tax_cross_layer_norm
if split_mlp_wi:
_lowerCAmelCase : Dict = tax_mlp_wi_a
_lowerCAmelCase : int = tax_mlp_wi_a
else:
_lowerCAmelCase : Optional[int] = tax_mlp_wi
_lowerCAmelCase : Dict = tax_mlp_wo
_lowerCAmelCase : List[Any] = txa_mlp_layer_norm
_lowerCAmelCase : Optional[Any] = flax_model_decoder_layer_block
# Decoder Normalization
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
_lowerCAmelCase : List[str] = txa_decoder_norm
# Only for layer 0:
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
_lowerCAmelCase : Union[str, Any] = tax_decoder_rel_embedding
# Token Embeddings
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
_lowerCAmelCase : Optional[int] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_lowerCAmelCase : Tuple = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(_lowerCamelCase )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
_a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
_a : List[str] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 126 | 1 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
# TODO Update this
_lowerCamelCase : Union[str, Any] = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''esm'''
def __init__( self : str , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[str]=768 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : Dict=3_072 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Optional[int]=1_026 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Optional[Any]=1e-12 , UpperCAmelCase__ : Optional[Any]="absolute" , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : List[Any] , ) ->Tuple:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , mask_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = emb_layer_norm_before
A__ = token_dropout
A__ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''')
A__ = EsmFoldConfig()
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = EsmFoldConfig(**UpperCAmelCase__)
A__ = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''')
A__ = get_default_vocab_list()
else:
A__ = vocab_list
else:
A__ = None
A__ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase__):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''')
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
A__ = super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase__):
A__ = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = None
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = 0
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = 128
UpperCAmelCase__ = None
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict:
'''simple docstring'''
if self.trunk is None:
A__ = TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase__):
A__ = TrunkConfig(**self.trunk)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
A__ = asdict(self)
A__ = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = 48
UpperCAmelCase__ = 1024
UpperCAmelCase__ = 128
UpperCAmelCase__ = 32
UpperCAmelCase__ = 32
UpperCAmelCase__ = 32
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = False
UpperCAmelCase__ = 4
UpperCAmelCase__ = 128
UpperCAmelCase__ = None
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
'''simple docstring'''
if self.structure_module is None:
A__ = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase__):
A__ = StructureModuleConfig(**self.structure_module)
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""")
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""")
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""")
A__ = self.sequence_state_dim // self.sequence_head_width
A__ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""")
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""")
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""")
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""")
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
'''simple docstring'''
A__ = asdict(self)
A__ = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = 384
UpperCAmelCase__ = 128
UpperCAmelCase__ = 16
UpperCAmelCase__ = 128
UpperCAmelCase__ = 12
UpperCAmelCase__ = 4
UpperCAmelCase__ = 8
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 8
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 7
UpperCAmelCase__ = 10
UpperCAmelCase__ = 1E-8
UpperCAmelCase__ = 1E5
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]:
'''simple docstring'''
return asdict(self)
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 14 |
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int ):
return [sentence[i : i + ngram_size] for i in range(len(lowerCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 284 | 0 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _A ( __magic_name__ ): # picklable for multiprocessing
return x.sum()
def _A ( __magic_name__ ): # picklable for multiprocessing
return i + 1
@dataclass
class lowerCAmelCase :
__lowerCamelCase = 42
__lowerCamelCase = 42
class lowerCAmelCase ( lowercase_ ):
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = {}
lowercase__ = []
lowercase__ = 1
lowercase__ = [1, 2]
lowercase__ = {"a": 1, "b": 2}
lowercase__ = {"a": [1, 2], "b": [3, 4]}
lowercase__ = {"a": {"1": 1}, "b": 2}
lowercase__ = {"a": 1, "b": 2, "c": 3, "d": 4}
lowercase__ = {}
lowercase__ = []
lowercase__ = 2
lowercase__ = [2, 3]
lowercase__ = {"a": 2, "b": 3}
lowercase__ = {"a": [2, 3], "b": [4, 5]}
lowercase__ = {"a": {"1": 2}, "b": 3}
lowercase__ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase ) , _lowercase )
lowercase__ = 2
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(map_nested(_lowercase , _lowercase , num_proc=_lowercase ) , _lowercase )
lowercase__ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
lowercase__ = {"a": 2, "b": 0, "c": 2}
lowercase__ = {
"a": np.eye(2 ).astype(_lowercase ),
"b": np.zeros(3 ).astype(_lowercase ),
"c": np.ones(2 ).astype(_lowercase ),
}
self.assertEqual(map_nested(_lowercase , _lowercase , map_numpy=_lowercase ) , _lowercase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowercase , _lowercase , map_numpy=_lowercase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_lowercase , _lowercase , map_numpy=_lowercase , num_proc=_lowercase ) , _lowercase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowercase , _lowercase , map_numpy=_lowercase , num_proc=_lowercase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_lowercase ): # can't pickle a local lambda
map_nested(lambda _lowercase : x + 1 , _lowercase , num_proc=_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = {"a": 1, "b": 2}
lowercase__ = {"a": 3, "b": 4}
lowercase__ = {"a": 5, "b": 6}
lowercase__ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_lowercase , _lowercase , _lowercase ) ) , _lowercase )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
class lowerCAmelCase :
__lowerCamelCase = 'bar'
lowercase__ = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(_lowercase , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
lowercase__ = {f'''{i}''': i for i in range(__magic_name__ )}
lowercase__ = map_nested(lambda __magic_name__ : x + 10 , __magic_name__ , num_proc=__magic_name__ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class lowerCAmelCase ( lowercase_ ):
@require_tf
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
lowercase__ = layers.Dense(2 )
def gen_random_output():
lowercase__ = tf.random.uniform((1, 3) )
return model(_lowercase ).numpy()
with temp_seed(42 , set_tensorflow=_lowercase ):
lowercase__ = gen_random_output()
with temp_seed(42 , set_tensorflow=_lowercase ):
lowercase__ = gen_random_output()
lowercase__ = gen_random_output()
np.testing.assert_equal(_lowercase , _lowercase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
import torch
def gen_random_output():
lowercase__ = torch.nn.Linear(3 , 2 )
lowercase__ = torch.rand(1 , 3 )
return model(_lowercase ).detach().numpy()
with temp_seed(42 , set_pytorch=_lowercase ):
lowercase__ = gen_random_output()
with temp_seed(42 , set_pytorch=_lowercase ):
lowercase__ = gen_random_output()
lowercase__ = gen_random_output()
np.testing.assert_equal(_lowercase , _lowercase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
lowercase__ = gen_random_output()
with temp_seed(42 ):
lowercase__ = gen_random_output()
lowercase__ = gen_random_output()
np.testing.assert_equal(_lowercase , _lowercase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def _A ( __magic_name__ ):
lowercase__ = NestedDataStructure(__magic_name__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = NestedDataStructure(__magic_name__ ).flatten()
assert output == expected_output
def _A ( ):
lowercase__ = A(x=1 , y="foobar" )
lowercase__ = {"x": 1, "y": "foobar"}
assert asdict(__magic_name__ ) == expected_output
lowercase__ = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
lowercase__ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(__magic_name__ ) == expected_output
with pytest.raises(__magic_name__ ):
asdict([1, A(x=10 , y="foo" )] )
def _A ( __magic_name__ ):
return text.split()
def _A ( __magic_name__ ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _A ( ):
with Pool(2 ) as pool:
lowercase__ = list(iflatmap_unordered(__magic_name__ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__magic_name__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
lowercase__ = list(iflatmap_unordered(__magic_name__ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__magic_name__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
lowercase__ = []
for yield_time, content in iflatmap_unordered(
__magic_name__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__magic_name__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(__magic_name__ ) == 4
| 350 |
import math
def _A ( __magic_name__ ):
lowercase__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__magic_name__ )
def _A ( __magic_name__ = 1 / 1_2345 ):
lowercase__ = 0
lowercase__ = 0
lowercase__ = 3
while True:
lowercase__ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__magic_name__ ):
lowercase__ = int(__magic_name__ )
total_partitions += 1
if check_partition_perfect(__magic_name__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__magic_name__ )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 201 | 0 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_lowerCAmelCase = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_lowerCAmelCase = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
_lowerCAmelCase = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_lowerCAmelCase = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_lowerCAmelCase = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Any = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , snake_case__ )
return [m.group(0 ) for m in matches]
def __lowerCAmelCase ( ):
__UpperCamelCase : Dict = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCamelCase : Any = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCamelCase : List[Any] = collections.defaultdict(snake_case__ )
__UpperCamelCase : Tuple = collections.defaultdict(snake_case__ )
__UpperCamelCase : List[str] = collections.defaultdict(snake_case__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(snake_case__ ):
__UpperCamelCase : Tuple = None
if _re_tf_models.match(snake_case__ ) is not None:
__UpperCamelCase : Dict = tf_models
__UpperCamelCase : str = _re_tf_models.match(snake_case__ ).groups()[0]
elif _re_flax_models.match(snake_case__ ) is not None:
__UpperCamelCase : str = flax_models
__UpperCamelCase : Any = _re_flax_models.match(snake_case__ ).groups()[0]
elif _re_pt_models.match(snake_case__ ) is not None:
__UpperCamelCase : Optional[int] = pt_models
__UpperCamelCase : Any = _re_pt_models.match(snake_case__ ).groups()[0]
if lookup_dict is not None:
while len(snake_case__ ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCamelCase : Tuple = True
break
# Try again after removing the last word in the name
__UpperCamelCase : int = "".join(camel_case_split(snake_case__ )[:-1] )
__UpperCamelCase : str = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCamelCase : str = list(snake_case__ )
all_models.sort()
__UpperCamelCase : Union[str, Any] = {"model_type": all_models}
__UpperCamelCase : str = [pt_models[t] for t in all_models]
__UpperCamelCase : int = [tf_models[t] for t in all_models]
__UpperCamelCase : List[Any] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCamelCase : List[str] = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCamelCase : int = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCamelCase : Optional[Any] = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCamelCase : Tuple = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCamelCase : Optional[int] = "AutoTokenizer"
__UpperCamelCase : Optional[int] = [processors[t] for t in all_models]
return pd.DataFrame(snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : str = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCamelCase : List[Any] = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
__UpperCamelCase : List[Any] = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(snake_case__ , snake_case__ , snake_case__ ):
# The type of pipeline may not exist in this framework
if not hasattr(snake_case__ , snake_case__ ):
continue
# First extract all model_names
__UpperCamelCase : List[Any] = []
for name in getattr(snake_case__ , snake_case__ ).values():
if isinstance(snake_case__ , snake_case__ ):
model_names.append(snake_case__ )
else:
model_names.extend(list(snake_case__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : Any = get_frameworks_table()
__UpperCamelCase : Union[str, Any] = Dataset.from_pandas(snake_case__ )
__UpperCamelCase : Optional[int] = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=snake_case__ )
__UpperCamelCase : Any = Dataset.from_json(snake_case__ )
__UpperCamelCase : int = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(snake_case__ ) )
}
__UpperCamelCase : Any = update_pipeline_and_auto_class_table(snake_case__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCamelCase : List[Any] = sorted(table.keys() )
__UpperCamelCase : Dict = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
__UpperCamelCase : Optional[int] = Dataset.from_pandas(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(snake_case__ , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(snake_case__ , "pipeline_tags.json" ) )
if commit_sha is not None:
__UpperCamelCase : int = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
__UpperCamelCase : Dict = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=snake_case__ , repo_type="dataset" , token=snake_case__ , commit_message=snake_case__ , )
def __lowerCAmelCase ( ):
__UpperCamelCase : Optional[int] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCamelCase : List[Any] = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCamelCase : List[Any] = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCamelCase : Optional[Any] = pipeline_tasks[key]["pt"]
if isinstance(snake_case__ , (list, tuple) ):
__UpperCamelCase : List[str] = model[0]
__UpperCamelCase : Union[str, Any] = model.__name__
if model not in in_table.values():
missing.append(snake_case__ )
if len(snake_case__ ) > 0:
__UpperCamelCase : Optional[Any] = ", ".join(snake_case__ )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
_lowerCAmelCase = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 298 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Dict:
__UpperCamelCase : Optional[Any] = parent
__UpperCamelCase : List[str] = 1_3
__UpperCamelCase : List[Any] = 7
__UpperCamelCase : List[str] = True
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = True
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = 9_9
__UpperCamelCase : Union[str, Any] = 3_8_4
__UpperCamelCase : str = 2
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : Any = 3_7
__UpperCamelCase : str = "gelu"
__UpperCamelCase : Optional[Any] = 0.1
__UpperCamelCase : str = 0.1
__UpperCamelCase : str = 5_1_2
__UpperCamelCase : Optional[Any] = 1_6
__UpperCamelCase : Dict = 2
__UpperCamelCase : Optional[int] = 0.02
__UpperCamelCase : List[Any] = 3
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : int = 1_2_8
__UpperCamelCase : Tuple = 2
__UpperCamelCase : str = 9
__UpperCamelCase : List[Any] = 1
__UpperCamelCase : Any = None
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : str = None
if self.use_input_mask:
__UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : int = None
if self.use_token_type_ids:
__UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : List[Any] = None
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Optional[Any] = None
if self.use_labels:
__UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : str = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : Tuple = TFConvBertModel(config=_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCamelCase : Optional[Any] = [input_ids, input_mask]
__UpperCamelCase : str = model(_UpperCAmelCase )
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : int = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = self.num_labels
__UpperCamelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__UpperCamelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : Optional[int] = self.num_choices
__UpperCamelCase : List[Any] = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__UpperCamelCase : Optional[int] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : str = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = self.num_labels
__UpperCamelCase : Tuple = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : int = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ (self ) -> str:
__UpperCamelCase : str = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Any = config_and_inputs
__UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Tuple = TFConvBertModelTester(self )
__UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 )
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> Dict:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a_ (self ) -> Any:
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : int = True
if hasattr(_UpperCAmelCase , "use_cache" ):
__UpperCamelCase : List[Any] = True
__UpperCamelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : int = model_class(_UpperCAmelCase )
__UpperCamelCase : Any = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__UpperCamelCase : List[str] = os.path.join(_UpperCAmelCase , "saved_model" , "1" )
__UpperCamelCase : List[str] = tf.keras.models.load_model(_UpperCAmelCase )
__UpperCamelCase : Dict = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : Any = outputs["encoder_hidden_states"]
__UpperCamelCase : Tuple = outputs["encoder_attentions"]
else:
__UpperCamelCase : Tuple = outputs["hidden_states"]
__UpperCamelCase : Optional[int] = outputs["attentions"]
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__UpperCamelCase : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : Tuple = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Any = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
__UpperCamelCase : List[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Dict = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__UpperCamelCase : List[str] = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = True
__UpperCamelCase : Dict = False
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Tuple = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = model_class(_UpperCAmelCase )
__UpperCamelCase : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__UpperCamelCase : int = True
__UpperCamelCase : str = True
__UpperCamelCase : Optional[Any] = model_class(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ (self ) -> str:
__UpperCamelCase : Dict = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
__UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase )[0]
__UpperCamelCase : Tuple = [1, 6, 7_6_8]
self.assertEqual(output.shape , _UpperCAmelCase )
__UpperCamelCase : Any = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 )
| 298 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A__ ( UpperCAmelCase__ ):
A__ = 42
A__ = 42
def __init__( self : Any , _a : Tuple , _a : List[str] ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self : str , _a : List[str] = 1 , _a : int = 50 , _a : List[Any] = None , _a : Any = "pil" , _a : List[str] = True , **_a : str , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.unet.config.sample_size
_SCREAMING_SNAKE_CASE =(batch_size, 3, img_size, img_size)
_SCREAMING_SNAKE_CASE =self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_SCREAMING_SNAKE_CASE =randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_SCREAMING_SNAKE_CASE =self.scheduler.schedule[t]
_SCREAMING_SNAKE_CASE =self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_SCREAMING_SNAKE_CASE =self.scheduler.add_noise_to_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_SCREAMING_SNAKE_CASE =(sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_SCREAMING_SNAKE_CASE =self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_SCREAMING_SNAKE_CASE =(sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_SCREAMING_SNAKE_CASE =self.scheduler.step_correct(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , step_output.prev_sample , step_output['derivative'] , )
_SCREAMING_SNAKE_CASE =step_output.prev_sample
_SCREAMING_SNAKE_CASE =(sample / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE =sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE =self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 360 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class A__ ( A__ ):
A__ = 'MCTCTFeatureExtractor'
A__ = 'AutoTokenizer'
def __init__( self : Optional[Any] , _a : Optional[int] , _a : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_a , _a )
_SCREAMING_SNAKE_CASE =self.feature_extractor
_SCREAMING_SNAKE_CASE =False
def __call__( self : Dict , *_a : str , **_a : Dict ) -> Dict:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
_SCREAMING_SNAKE_CASE =kwargs.pop('raw_speech' )
else:
_SCREAMING_SNAKE_CASE =kwargs.pop('audio' , _a )
_SCREAMING_SNAKE_CASE =kwargs.pop('sampling_rate' , _a )
_SCREAMING_SNAKE_CASE =kwargs.pop('text' , _a )
if len(_a ) > 0:
_SCREAMING_SNAKE_CASE =args[0]
_SCREAMING_SNAKE_CASE =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_SCREAMING_SNAKE_CASE =self.feature_extractor(_a , *_a , sampling_rate=_a , **_a )
if text is not None:
_SCREAMING_SNAKE_CASE =self.tokenizer(_a , **_a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_SCREAMING_SNAKE_CASE =encodings['input_ids']
return inputs
def A ( self : Any , *_a : List[str] , **_a : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_a , **_a )
def A ( self : Dict , *_a : Tuple , **_a : Dict ) -> List[str]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*_a , **_a )
_SCREAMING_SNAKE_CASE =kwargs.pop('input_features' , _a )
_SCREAMING_SNAKE_CASE =kwargs.pop('labels' , _a )
if len(_a ) > 0:
_SCREAMING_SNAKE_CASE =args[0]
_SCREAMING_SNAKE_CASE =args[1:]
if input_features is not None:
_SCREAMING_SNAKE_CASE =self.feature_extractor.pad(_a , *_a , **_a )
if labels is not None:
_SCREAMING_SNAKE_CASE =self.tokenizer.pad(_a , **_a )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_SCREAMING_SNAKE_CASE =labels['input_ids']
return input_features
def A ( self : Tuple , *_a : Dict , **_a : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def A ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =self.tokenizer
yield
_SCREAMING_SNAKE_CASE =self.feature_extractor
_SCREAMING_SNAKE_CASE =False
| 114 | 0 |
from collections import deque
def _a ( lowerCamelCase ):
lowerCamelCase : Dict = len(lowerCamelCase )
lowerCamelCase : Optional[Any] = deque()
lowerCamelCase : Optional[Any] = [False for _ in range(lowerCamelCase )]
lowerCamelCase : Optional[int] = [-1 for _ in range(lowerCamelCase )]
lowerCamelCase : Tuple = index_of[:]
def strong_connect(lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[Any] = index # the number when this node is seen
lowerCamelCase : List[str] = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase )
lowerCamelCase : Tuple = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase : int = strong_connect(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[str] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase : str = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase : Optional[int] = []
lowerCamelCase : str = stack.pop()
lowerCamelCase : int = False
component.append(lowerCamelCase )
while w != v:
lowerCamelCase : Tuple = stack.pop()
lowerCamelCase : int = False
component.append(lowerCamelCase )
components.append(lowerCamelCase )
return index
lowerCamelCase : Any = []
for v in range(lowerCamelCase ):
if index_of[v] == -1:
strong_connect(lowerCamelCase, 0, lowerCamelCase )
return components
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = [[] for _ in range(lowerCamelCase )]
for u, v in edges:
g[u].append(lowerCamelCase )
return g
if __name__ == "__main__":
# Test
_lowerCamelCase =7
_lowerCamelCase =[0, 0, 1, 2, 3, 3, 4, 4, 6]
_lowerCamelCase =[1, 3, 2, 0, 1, 4, 5, 6, 5]
_lowerCamelCase =[(u, v) for u, v in zip(source, target)]
_lowerCamelCase =create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 287 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 287 | 1 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
__snake_case =100
__snake_case =set(range(3, NUM_PRIMES, 2))
primes.add(2)
__snake_case =42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def a_ ( lowerCamelCase : int ):
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowerCAmelCase = set()
lowerCAmelCase = 42
lowerCAmelCase = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def a_ ( lowerCamelCase : int = 5000 ):
for number_to_partition in range(1 , lowerCamelCase ):
if len(partition(lowerCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 359 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def a_ ( lowerCamelCase : Iterable[str] , lowerCamelCase : int ):
lowerCAmelCase = iter(lowerCamelCase )
while True:
lowerCAmelCase = tuple(itertools.islice(lowerCamelCase , lowerCamelCase ) )
if not chunk:
return
yield chunk
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
lowerCAmelCase = ''
if len(lowerCamelCase ) < 2:
return dirty
for i in range(len(lowerCamelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowerCamelCase ) & 1:
clean += "X"
return clean
def a_ ( lowerCamelCase : str ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
lowerCAmelCase = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowerCAmelCase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowerCamelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowerCamelCase )
return table
def a_ ( lowerCamelCase : str , lowerCamelCase : str ):
lowerCAmelCase = generate_table(lowerCamelCase )
lowerCAmelCase = prepare_input(lowerCamelCase )
lowerCAmelCase = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCamelCase , 2 ):
lowerCAmelCase , lowerCAmelCase = divmod(table.index(lowerCamelCase ) , 5 )
lowerCAmelCase , lowerCAmelCase = divmod(table.index(lowerCamelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def a_ ( lowerCamelCase : str , lowerCamelCase : str ):
lowerCAmelCase = generate_table(lowerCamelCase )
lowerCAmelCase = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCamelCase , 2 ):
lowerCAmelCase , lowerCAmelCase = divmod(table.index(lowerCamelCase ) , 5 )
lowerCAmelCase , lowerCAmelCase = divmod(table.index(lowerCamelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 55 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( snake_case_ : str ) ->str:
lowerCamelCase__ : int =0
# if input_string is "aba" than new_input_string become "a|b|a"
lowerCamelCase__ : Optional[int] =''
lowerCamelCase__ : List[str] =''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(snake_case_ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowerCamelCase__ , lowerCamelCase__ : Tuple =0, 0
# length[i] shows the length of palindromic substring with center i
lowerCamelCase__ : int =[1 for i in range(len(snake_case_ ) )]
# for each character in new_string find corresponding palindromic string
lowerCamelCase__ : Optional[Any] =0
for j in range(len(snake_case_ ) ):
lowerCamelCase__ : Union[str, Any] =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(snake_case_ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowerCamelCase__ : List[Any] =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowerCamelCase__ : Optional[int] =j - k + 1 # noqa: E741
lowerCamelCase__ : Dict =j + k - 1
# update max_length and start position
if max_length < length[j]:
lowerCamelCase__ : List[Any] =length[j]
lowerCamelCase__ : Union[str, Any] =j
# create that string
lowerCamelCase__ : List[Any] =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod() | 126 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ = None
def __init__( self :Dict , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :Any=None , lowerCamelCase_ :int=None , lowerCamelCase_ :List[str]="<unk>" , lowerCamelCase_ :List[Any]="<s>" , lowerCamelCase_ :str="</s>" , lowerCamelCase_ :Union[str, Any]="<pad>" , lowerCamelCase_ :Union[str, Any]=False , lowerCamelCase_ :Dict=False , **lowerCamelCase_ :List[Any] , ):
"""simple docstring"""
super().__init__(
lowerCamelCase_ , lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCamelCase__ : List[str] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCamelCase_ ) != add_prefix_space:
lowerCamelCase__ : str =getattr(lowerCamelCase_ , pre_tok_state.pop('type' ) )
lowerCamelCase__ : List[Any] =add_prefix_space
lowerCamelCase__ : Optional[Any] =pre_tok_class(**lowerCamelCase_ )
lowerCamelCase__ : Any =add_prefix_space
def UpperCAmelCase__ ( self :Optional[int] , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =kwargs.get('is_split_into_words' , lowerCamelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
' pretokenized inputs.' )
return super()._batch_encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :int , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Any ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =kwargs.get('is_split_into_words' , lowerCamelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
' pretokenized inputs.' )
return super()._encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :"Conversation" ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) + [self.eos_token_id] )
if len(lowerCamelCase_ ) > self.model_max_length:
lowerCamelCase__ : List[str] =input_ids[-self.model_max_length :]
return input_ids | 126 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> tuple[float, list[float]]:
'''simple docstring'''
_snake_case = list(range(len(UpperCamelCase__ ) ) )
_snake_case = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ )
_snake_case = 0
_snake_case = [0] * len(UpperCamelCase__ )
for i in index:
if weight[i] <= capacity:
_snake_case = 1
max_value += value[i]
capacity -= weight[i]
else:
_snake_case = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295 |
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
_snake_case , _snake_case = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_snake_case = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCAmelCase_ = imread("""image_data/lena.jpg""", 1)
# convert to its negative
UpperCAmelCase_ = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 295 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 82 |
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ = "", __magic_name__ = False ) -> None:
"""simple docstring"""
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : Optional[Any] = is_leaf
UpperCamelCase__ : List[str] = prefix
def UpperCamelCase__ ( self, __magic_name__ ) -> tuple[str, str, str]:
"""simple docstring"""
UpperCamelCase__ : Dict = 0
for q, w in zip(self.prefix, __magic_name__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
for word in words:
self.insert(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCamelCase__ : Union[str, Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : Tuple = RadixNode(prefix=__magic_name__, is_leaf=__magic_name__ )
else:
UpperCamelCase__ : Any = self.nodes[word[0]]
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = incoming_node.match(
__magic_name__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__magic_name__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Tuple = remaining_prefix
UpperCamelCase__ : Tuple = self.nodes[matching_string[0]]
UpperCamelCase__ : List[Any] = RadixNode(__magic_name__, __magic_name__ )
UpperCamelCase__ : str = aux_node
if remaining_word == "":
UpperCamelCase__ : Any = True
else:
self.nodes[matching_string[0]].insert(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> bool:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0], __magic_name__ )
if not incoming_node:
return False
else:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Dict = incoming_node.match(
__magic_name__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> bool:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0], __magic_name__ )
if not incoming_node:
return False
else:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = incoming_node.match(
__magic_name__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__magic_name__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
UpperCamelCase__ : Optional[Any] = list(self.nodes.values() )[0]
UpperCamelCase__ : Union[str, Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : int = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
UpperCamelCase__ : Any = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : Union[str, Any] = list(incoming_node.nodes.values() )[0]
UpperCamelCase__ : List[str] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : int = merging_node.nodes
return True
def UpperCamelCase__ ( self, __magic_name__ = 0 ) -> None:
"""simple docstring"""
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowerCAmelCase_ ( ) -> bool:
UpperCamelCase__ : Optional[int] = '''banana bananas bandana band apple all beast'''.split()
UpperCamelCase__ : Optional[int] = RadixNode()
root.insert_many(__UpperCAmelCase )
assert all(root.find(__UpperCAmelCase ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowerCAmelCase_ ( ) -> None:
assert test_trie()
def lowerCAmelCase_ ( ) -> None:
UpperCamelCase__ : int = RadixNode()
UpperCamelCase__ : Any = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(__UpperCAmelCase )
print('''Words:''' , __UpperCAmelCase )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 201 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase__ : Optional[int] ={'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict =['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] =[
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : List[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : List[str] ={
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] =[
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118 | 1 |
from __future__ import annotations
import time
a =list[tuple[int, int]]
a =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a =[[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class A_ :
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Node | None):
__lowerCamelCase : Tuple = pos_x
__lowerCamelCase : List[str] = pos_y
__lowerCamelCase : str = (pos_y, pos_x)
__lowerCamelCase : str = goal_x
__lowerCamelCase : int = goal_y
__lowerCamelCase : List[Any] = parent
class A_ :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tuple[int, int] ,SCREAMING_SNAKE_CASE__ : tuple[int, int]):
__lowerCamelCase : Any = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = [self.start]
__lowerCamelCase : List[str] = False
def lowerCAmelCase ( self : List[Any]):
while self.node_queue:
__lowerCamelCase : Any = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
__lowerCamelCase : Dict = True
return self.retrace_path(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.get_successors(SCREAMING_SNAKE_CASE__)
for node in successors:
self.node_queue.append(SCREAMING_SNAKE_CASE__)
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Node):
__lowerCamelCase : Union[str, Any] = []
for action in delta:
__lowerCamelCase : Optional[Any] = parent.pos_x + action[1]
__lowerCamelCase : Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE__) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.target.pos_y ,self.target.pos_x ,SCREAMING_SNAKE_CASE__))
return successors
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Node | None):
__lowerCamelCase : List[Any] = node
__lowerCamelCase : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
__lowerCamelCase : int = current_node.parent
path.reverse()
return path
class A_ :
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : int = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = False
def lowerCAmelCase ( self : str):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__lowerCamelCase : Any = self.fwd_bfs.node_queue.pop(0)
__lowerCamelCase : Any = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
__lowerCamelCase : List[str] = True
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = current_bwd_node
__lowerCamelCase : int = current_fwd_node
__lowerCamelCase : str = {
self.fwd_bfs: self.fwd_bfs.get_successors(SCREAMING_SNAKE_CASE__),
self.bwd_bfs: self.bwd_bfs.get_successors(SCREAMING_SNAKE_CASE__),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(SCREAMING_SNAKE_CASE__)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Node ,SCREAMING_SNAKE_CASE__ : Node):
__lowerCamelCase : List[Any] = self.fwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = self.bwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__)
bwd_path.pop()
bwd_path.reverse()
__lowerCamelCase : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a =(0, 0)
a =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a =time.time()
a =BreadthFirstSearch(init, goal)
a =bfs.search()
a =time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
a =time.time()
a =BidirectionalBreadthFirstSearch(init, goal)
a =bd_bfs.search()
a =time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 73 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : Dict = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : int = 't5'
a : Dict = ['past_key_values']
a : Tuple = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : str , __lowercase : Optional[int]=32128 , __lowercase : Optional[int]=512 , __lowercase : int=64 , __lowercase : Any=2048 , __lowercase : Tuple=6 , __lowercase : Tuple=None , __lowercase : int=8 , __lowercase : List[Any]=32 , __lowercase : Dict=128 , __lowercase : Optional[int]=0.1 , __lowercase : int=1e-6 , __lowercase : List[str]=1.0 , __lowercase : List[str]="relu" , __lowercase : Dict=True , __lowercase : Optional[Any]=True , __lowercase : Tuple=0 , __lowercase : List[str]=1 , **__lowercase : Any , ) -> str:
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Union[str, Any] = d_kv
__UpperCAmelCase : Union[str, Any] = d_ff
__UpperCAmelCase : int = num_layers
__UpperCAmelCase : Any = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__UpperCAmelCase : Dict = num_heads
__UpperCAmelCase : List[Any] = relative_attention_num_buckets
__UpperCAmelCase : List[str] = relative_attention_max_distance
__UpperCAmelCase : Union[str, Any] = dropout_rate
__UpperCAmelCase : List[str] = layer_norm_epsilon
__UpperCAmelCase : str = initializer_factor
__UpperCAmelCase : Dict = feed_forward_proj
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : List[Any] = self.feed_forward_proj.split("""-""" )
__UpperCAmelCase : Tuple = act_info[-1]
__UpperCAmelCase : int = act_info[0] == """gated"""
if len(__lowercase ) > 1 and act_info[0] != "gated" or len(__lowercase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__UpperCAmelCase : Dict = """gelu_new"""
super().__init__(
pad_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , **__lowercase , )
class a ( lowercase__ ):
"""simple docstring"""
@property
def UpperCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
__UpperCAmelCase : Union[str, Any] = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
__UpperCAmelCase : List[Any] = """past_encoder_sequence + sequence"""
__UpperCAmelCase : Optional[int] = {0: """batch"""}
__UpperCAmelCase : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__UpperCAmelCase : str = {0: """batch""", 1: """decoder_sequence"""}
__UpperCAmelCase : str = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction="""inputs""" )
return common_inputs
@property
def UpperCAmelCase ( self : int ) -> int:
return 13
| 114 | 0 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
_UpperCAmelCase : Dict = parser.parse_args()
_UpperCAmelCase : Any = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_UpperCAmelCase : Tuple = CLIPImageProcessor()
_UpperCAmelCase : Optional[int] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
_UpperCAmelCase : Optional[int] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 362 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCAmelCase = {'''unk_token''': '''<unk>'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
__lowerCAmelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowercase , __lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase )
def _snake_case (self , **__lowercase ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self ):
shutil.rmtree(self.tmpdirname )
def _snake_case (self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase )
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowercase )
self.assertIsInstance(processor_fast.tokenizer , __lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowercase )
self.assertIsInstance(processor_fast.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' )
__lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = processor(text=__lowercase , return_tensors='''np''' )
__lowerCAmelCase = tokenizer(__lowercase , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = '''google/owlvit-base-patch32'''
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase )
__lowerCAmelCase = ['''cat''', '''nasa badge''']
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = '''google/owlvit-base-patch32'''
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase )
__lowerCAmelCase = [['''cat''', '''nasa badge'''], ['''person''']]
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = 16
__lowerCAmelCase = len(__lowercase )
__lowerCAmelCase = max([len(__lowercase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = '''google/owlvit-base-patch32'''
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase )
__lowerCAmelCase = ['''cat''', '''nasa badge''']
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = 16
__lowerCAmelCase = inputs['''input_ids''']
__lowerCAmelCase = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(images=__lowercase , query_images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__lowercase )
__lowerCAmelCase = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
| 9 | 0 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
SCREAMING_SNAKE_CASE_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase__ ( datasets.BuilderConfig ):
'''simple docstring'''
__snake_case : int = None
__snake_case : Tuple = "utf-8"
__snake_case : List[Any] = None
__snake_case : Any = None
__snake_case : List[str] = True # deprecated
__snake_case : Union[str, Any] = None # deprecated
__snake_case : Dict = 10 << 20 # 10MB
__snake_case : Optional[int] = None
class UpperCamelCase__ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__snake_case : Dict = JsonConfig
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
SCREAMING_SNAKE_CASE = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase__ ,(str, list, tuple) ):
SCREAMING_SNAKE_CASE = data_files
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = [files]
SCREAMING_SNAKE_CASE = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"""files""": files} )]
SCREAMING_SNAKE_CASE = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = [files]
SCREAMING_SNAKE_CASE = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCamelCase__ ,gen_kwargs={"""files""": files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : List[Any] ) -> Dict:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
SCREAMING_SNAKE_CASE = self.config.features.arrow_schema.field(lowerCamelCase__ ).type
SCREAMING_SNAKE_CASE = pa_table.append_column(lowerCamelCase__ ,pa.array([None] * len(lowerCamelCase__ ) ,type=lowerCamelCase__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE = table_cast(lowerCamelCase__ ,self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCamelCase__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(lowerCamelCase__ ,encoding=self.config.encoding ,errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE = json.load(lowerCamelCase__ )
# We keep only the field we are interested in
SCREAMING_SNAKE_CASE = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(lowerCamelCase__ ,(list, tuple) ):
SCREAMING_SNAKE_CASE = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE = {col: [row.get(lowerCamelCase__ ) for row in dataset] for col in keys}
else:
SCREAMING_SNAKE_CASE = dataset
SCREAMING_SNAKE_CASE = pa.Table.from_pydict(lowerCamelCase__ )
yield file_idx, self._cast_table(lowerCamelCase__ )
# If the file has one json object per line
else:
with open(lowerCamelCase__ ,"""rb""" ) as f:
SCREAMING_SNAKE_CASE = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
SCREAMING_SNAKE_CASE = max(self.config.chunksize // 32 ,16 << 10 )
SCREAMING_SNAKE_CASE = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
SCREAMING_SNAKE_CASE = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(lowerCamelCase__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
SCREAMING_SNAKE_CASE = batch.decode(self.config.encoding ,errors=lowerCamelCase__ ).encode("""utf-8""" )
try:
while True:
try:
SCREAMING_SNAKE_CASE = paj.read_json(
io.BytesIO(lowerCamelCase__ ) ,read_options=paj.ReadOptions(block_size=lowerCamelCase__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(lowerCamelCase__ ,pa.ArrowInvalid )
and "straddling" not in str(lowerCamelCase__ )
or block_size > len(lowerCamelCase__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(lowerCamelCase__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
lowerCamelCase__ ,encoding=self.config.encoding ,errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE = json.load(lowerCamelCase__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file \'{file}\' with error {type(lowerCamelCase__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): # list is the only sequence type supported in JSON
try:
SCREAMING_SNAKE_CASE = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE = {col: [row.get(lowerCamelCase__ ) for row in dataset] for col in keys}
SCREAMING_SNAKE_CASE = pa.Table.from_pydict(lowerCamelCase__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file \'{file}\' with error {type(lowerCamelCase__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(lowerCamelCase__ )
break
else:
logger.error(F"""Failed to read file \'{file}\' with error {type(lowerCamelCase__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCamelCase__ )
batch_idx += 1
| 296 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a_ : int = logging.get_logger(__name__)
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = ["input_features", "attention_mask"]
def __init__( self , UpperCamelCase=80 , UpperCamelCase=1_6000 , UpperCamelCase=80 , UpperCamelCase=0.0 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(feature_size=UpperCamelCase , sampling_rate=UpperCamelCase , padding_value=UpperCamelCase , **UpperCamelCase )
lowerCamelCase_ = num_mel_bins
lowerCamelCase_ = do_ceptral_normalize
lowerCamelCase_ = normalize_means
lowerCamelCase_ = normalize_vars
lowerCamelCase_ = True
def snake_case ( self , UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowerCamelCase_ = torch.from_numpy(UpperCamelCase ).unsqueeze(0 )
lowerCamelCase_ = ta_kaldi.fbank(UpperCamelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = 0.0 , ):
"""simple docstring"""
# make sure we normalize float32 arrays
if normalize_means:
lowerCamelCase_ = x[:input_length].mean(axis=0 )
lowerCamelCase_ = np.subtract(UpperCamelCase , UpperCamelCase )
if normalize_vars:
lowerCamelCase_ = x[:input_length].std(axis=0 )
lowerCamelCase_ = np.divide(UpperCamelCase , UpperCamelCase )
if input_length < x.shape[0]:
lowerCamelCase_ = padding_value
# make sure array is in float32
lowerCamelCase_ = x.astype(np.floataa )
return x
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCamelCase , UpperCamelCase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(UpperCamelCase , UpperCamelCase )
]
def __call__( self , UpperCamelCase , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , **UpperCamelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCamelCase_ = isinstance(UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCamelCase_ = is_batched_numpy or (
isinstance(UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase_ = [np.asarray(UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase , np.ndarray ):
lowerCamelCase_ = np.asarray(UpperCamelCase , dtype=np.floataa )
elif isinstance(UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase_ = [raw_speech]
# extract fbank features
lowerCamelCase_ = [self._extract_fbank_features(UpperCamelCase ) for waveform in raw_speech]
# convert into correct format for padding
lowerCamelCase_ = BatchFeature({"input_features": features} )
lowerCamelCase_ = self.pad(
UpperCamelCase , padding=UpperCamelCase , max_length=UpperCamelCase , truncation=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , **UpperCamelCase , )
# make sure list is in array format
lowerCamelCase_ = padded_inputs.get("input_features" )
if isinstance(input_features[0] , UpperCamelCase ):
lowerCamelCase_ = [np.asarray(UpperCamelCase , dtype=np.floataa ) for feature in input_features]
lowerCamelCase_ = padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowerCamelCase_ = [np.asarray(UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowerCamelCase_ = (
np.array(UpperCamelCase , dtype=np.intaa )
if self._get_padding_strategies(UpperCamelCase , max_length=UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCamelCase_ = self.normalize(
padded_inputs["input_features"] , attention_mask=UpperCamelCase )
if return_tensors is not None:
lowerCamelCase_ = padded_inputs.convert_to_tensors(UpperCamelCase )
return padded_inputs
| 55 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__a: int = logging.get_logger(__name__)
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> None:
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 214 | '''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a: Union[str, Any] = logging.get_logger(__name__)
__a: Tuple = {"""tokenizer_file""": """tokenizer.json"""}
__a: Union[str, Any] = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE = None
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase=False , __lowerCAmelCase=False , **__lowerCAmelCase , ) -> Union[str, Any]:
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase__ : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
lowercase__ : int = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) )
lowercase__ : Tuple = add_prefix_space
lowercase__ : List[str] = pre_tok_class(**__lowerCAmelCase )
lowercase__ : Union[str, Any] = add_prefix_space
def _lowerCAmelCase( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> BatchEncoding:
lowercase__ : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> BatchEncoding:
lowercase__ : str = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Tuple[str]:
lowercase__ : List[Any] = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[int]:
lowercase__ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) + [self.eos_token_id] )
if len(__lowerCAmelCase ) > self.model_max_length:
lowercase__ : Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
| 214 | 1 |
lowerCAmelCase = [
(1_0_0_0, '''M'''),
(9_0_0, '''CM'''),
(5_0_0, '''D'''),
(4_0_0, '''CD'''),
(1_0_0, '''C'''),
(9_0, '''XC'''),
(5_0, '''L'''),
(4_0, '''XL'''),
(1_0, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0}
__lowercase= 0
__lowercase= 0
while place < len(lowercase__ ):
if (place + 1 < len(lowercase__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
__lowercase= []
for arabic, roman in ROMAN:
((__lowercase), (__lowercase))= divmod(lowercase__ , lowercase__ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295 |
from __future__ import annotations
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
if len(lowercase__ ) <= 1 or n <= 1:
return
insert_next(lowercase__ , n - 1 )
rec_insertion_sort(lowercase__ , n - 1 )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
if index >= len(lowercase__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__lowercase, __lowercase= (
collection[index],
collection[index - 1],
)
insert_next(lowercase__ , index + 1 )
if __name__ == "__main__":
lowerCAmelCase = input('''Enter integers separated by spaces: ''')
lowerCAmelCase = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 295 | 1 |
"""simple docstring"""
__snake_case = [0, 2, 4, 6, 8]
__snake_case = [1, 3, 5, 7, 9]
def __lowerCAmelCase ( lowercase : int , lowercase : int , lowercase : list[int] , lowercase : int ) -> int:
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case : Union[str, Any] = 0
for digit in range(10 ):
snake_case : List[Any] = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , lowercase , lowercase )
return result
snake_case : int = 0
for digita in range(10 ):
snake_case : Optional[int] = digita
if (remainder + digita) % 2 == 0:
snake_case : Dict = ODD_DIGITS
else:
snake_case : List[str] = EVEN_DIGITS
for digita in other_parity_digits:
snake_case : Union[str, Any] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , lowercase , lowercase , )
return result
def __lowerCAmelCase ( lowercase : int = 9 ) -> int:
"""simple docstring"""
snake_case : Dict = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(lowercase , 0 , [0] * length , lowercase )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 112 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Any = self.dummy_uncond_unet
snake_case : Tuple = KarrasVeScheduler()
snake_case : int = KarrasVePipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Optional[Any] = torch.manual_seed(0 )
snake_case : List[Any] = pipe(num_inference_steps=2 , generator=UpperCamelCase__ , output_type="numpy" ).images
snake_case : Dict = torch.manual_seed(0 )
snake_case : Dict = pipe(num_inference_steps=2 , generator=UpperCamelCase__ , output_type="numpy" , return_dict=UpperCamelCase__ )[0]
snake_case : Tuple = image[0, -3:, -3:, -1]
snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case : int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = "google/ncsnpp-celebahq-256"
snake_case : List[str] = UNetaDModel.from_pretrained(UpperCamelCase__ )
snake_case : Optional[Any] = KarrasVeScheduler()
snake_case : Optional[int] = KarrasVePipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Dict = torch.manual_seed(0 )
snake_case : Union[str, Any] = pipe(num_inference_steps=20 , generator=UpperCamelCase__ , output_type="numpy" ).images
snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case : Any = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 112 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : str = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ["ConvNextFeatureExtractor"]
A : str = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 118 | import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCamelCase (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase__ = None
lowerCamelCase__ = "utf-8"
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True # deprecated
lowerCamelCase__ = None # deprecated
lowerCamelCase__ = 1_0 << 2_0 # 10MB
lowerCamelCase__ = None
class lowerCamelCase (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase__ = JsonConfig
def __A ( self : Optional[int] ) -> Optional[int]:
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
SCREAMING_SNAKE_CASE_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self : List[str] , __magic_name__ : str ) -> Tuple:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__magic_name__ , (str, list, tuple) ):
SCREAMING_SNAKE_CASE_ = data_files
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = [files]
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(__magic_name__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
SCREAMING_SNAKE_CASE_ = []
for split_name, files in data_files.items():
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = [files]
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(__magic_name__ ) for file in files]
splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"files": files} ) )
return splits
def __A ( self : str , __magic_name__ : pa.Table ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema.field(__magic_name__ ).type
SCREAMING_SNAKE_CASE_ = pa_table.append_column(__magic_name__ , pa.array([None] * len(__magic_name__ ) , type=__magic_name__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE_ = table_cast(__magic_name__ , self.config.features.arrow_schema )
return pa_table
def __A ( self : List[str] , __magic_name__ : List[str] ) -> int:
for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
# We keep only the field we are interested in
SCREAMING_SNAKE_CASE_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__magic_name__ , (list, tuple) ):
SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE_ = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
else:
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(__magic_name__ )
yield file_idx, self._cast_table(__magic_name__ )
# If the file has one json object per line
else:
with open(__magic_name__ , "rb" ) as f:
SCREAMING_SNAKE_CASE_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
SCREAMING_SNAKE_CASE_ = max(self.config.chunksize // 32 , 16 << 10 )
SCREAMING_SNAKE_CASE_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
SCREAMING_SNAKE_CASE_ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__magic_name__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
SCREAMING_SNAKE_CASE_ = batch.decode(self.config.encoding , errors=__magic_name__ ).encode("utf-8" )
try:
while True:
try:
SCREAMING_SNAKE_CASE_ = paj.read_json(
io.BytesIO(__magic_name__ ) , read_options=paj.ReadOptions(block_size=__magic_name__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__magic_name__ , pa.ArrowInvalid )
and "straddling" not in str(__magic_name__ )
or block_size > len(__magic_name__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'''Batch of {len(__magic_name__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
except json.JSONDecodeError:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__magic_name__ , __magic_name__ ): # list is the only sequence type supported in JSON
try:
SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE_ = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(__magic_name__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise ValueError(F'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(__magic_name__ )
break
else:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise ValueError(
F'''Not able to read records in the JSON file at {file}. '''
F'''You should probably indicate the field of the JSON file containing your records. '''
F'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__magic_name__ )
batch_idx += 1
| 118 | 1 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class A ( A_ ):
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase="None" , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_mask
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= intermediate_size
__lowercase= hidden_act
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= relative_attention
__lowercase= position_biased_input
__lowercase= pos_att_type
__lowercase= scope
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= None
if self.use_input_mask:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A (self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _A (self , lowerCAmelCase ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= DebertaVaModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )[0]
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase )[0]
__lowercase= model(lowerCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= DebertaVaForMaskedLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= DebertaVaForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= DebertaVaForTokenClassification(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= DebertaVaForQuestionAnswering(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= DebertaVaForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : List[Any] =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Union[str, Any] =(
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Union[str, Any] =True
UpperCamelCase_ : int =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Dict =False
UpperCamelCase_ : List[str] =False
def _A (self ):
__lowercase= DebertaVaModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , hidden_size=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase )
@slow
def _A (self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= DebertaVaModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def _A (self ):
pass
@slow
def _A (self ):
__lowercase= DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
__lowercase= torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowercase= torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
# compare the actual values for a slice.
__lowercase= torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' )
| 355 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_lengths
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= gelu_activation
__lowercase= sinusoidal_embeddings
__lowercase= causal
__lowercase= asm
__lowercase= n_langs
__lowercase= vocab_size
__lowercase= n_special
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= summary_type
__lowercase= use_proj
__lowercase= scope
__lowercase= bos_token_id
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_input_lengths:
__lowercase= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , 2 ).float()
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A (self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
__lowercase= outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , )
((__lowercase), )= result_with_labels.to_tuple()
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
((__lowercase), )= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= XLMForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_choices
__lowercase= XLMForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : int =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase_ : str =(
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= XLMModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= min_length + idx + 1
__lowercase= (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , )
pass
@slow
def _A (self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= XLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(lowerCAmelCase )
__lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president
__lowercase= [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
| 304 | 0 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=lowerCAmelCase__ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=lowerCAmelCase__ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=lowerCAmelCase__ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=lowerCAmelCase__ , default=0 , help='''cuda_id.''' , )
lowercase = parser.parse_args()
return args
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if not len(lowerCAmelCase__ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
lowercase , lowercase = imgs[0].size
lowercase = Image.new('''RGB''' , size=(cols * w, rows * h) )
lowercase , lowercase = grid.size
for i, img in enumerate(lowerCAmelCase__ ):
grid.paste(lowerCAmelCase__ , box=(i % cols * w, i // cols * h) )
return grid
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__="robotic cat with wings" , lowerCAmelCase__=7.5 , lowerCAmelCase__=50 , lowerCAmelCase__=1 , lowerCAmelCase__=42 , ):
'''simple docstring'''
lowercase = torch.Generator(pipeline.device ).manual_seed(lowerCAmelCase__ )
lowercase = pipeline(
lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , ).images
lowercase = int(math.sqrt(lowerCAmelCase__ ) )
lowercase = image_grid(lowerCAmelCase__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase__ :Tuple = parse_args()
# Load models and create wrapper for stable diffusion
lowercase__ :Optional[int] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
lowercase__ :List[str] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
lowercase__ :Dict = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
lowercase__ :int = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
lowercase__ :Union[str, Any] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase__ :List[Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
lowercase__ :str = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
lowercase__ :Optional[int] = unet.to(torch.device("cuda", args.cuda_id))
lowercase__ :List[Any] = pipeline.to(unet.device)
lowercase__ , lowercase__ :Tuple = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
lowercase__ :Optional[Any] = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 101 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__lowerCAmelCase : Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__lowerCAmelCase : Any ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__lowerCAmelCase : Optional[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __magic_name__( self :Any , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = 0.0
for i, j in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase__ , lowerCAmelCase__ ) else 0.0
__SCREAMING_SNAKE_CASE : str = n_correct / len(lowerCAmelCase__ )
return {
"accuracy": accuracy,
}
| 9 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : int =logging.get_logger(__name__)
__snake_case : Tuple ={
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""gpt_neox_japanese"""
def __init__(self ,__lowerCamelCase=3_20_00 ,__lowerCamelCase=25_60 ,__lowerCamelCase=32 ,__lowerCamelCase=32 ,__lowerCamelCase=4 ,__lowerCamelCase="gelu" ,__lowerCamelCase=1.00 ,__lowerCamelCase=1_00_00 ,__lowerCamelCase=20_48 ,__lowerCamelCase=0.02 ,__lowerCamelCase=1e-5 ,__lowerCamelCase=True ,__lowerCamelCase=3_19_96 ,__lowerCamelCase=3_19_99 ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.0 ,**__lowerCamelCase ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : List[Any] = max_position_embeddings
lowerCAmelCase__ : List[Any] = hidden_size
lowerCAmelCase__ : List[str] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : Optional[int] = intermediate_multiple_size
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : int = rotary_pct
lowerCAmelCase__ : List[Any] = rotary_emb_base
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Dict = layer_norm_eps
lowerCAmelCase__ : Optional[Any] = use_cache
lowerCAmelCase__ : Dict = attention_dropout
lowerCAmelCase__ : List[str] = hidden_dropout
| 94 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] ={
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int =['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] =['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple =['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__snake_case : str =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 94 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
def snake_case_ ( self):
lowercase__ : Any = 'ZinengTang/tvlt-base'
lowercase__ : Optional[Any] = tempfile.mkdtemp()
def snake_case_ ( self , **a):
return TvltImageProcessor.from_pretrained(self.checkpoint , **a)
def snake_case_ ( self , **a):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **a)
def snake_case_ ( self):
shutil.rmtree(self.tmpdirname)
def snake_case_ ( self):
lowercase__ : int = self.get_image_processor()
lowercase__ : Dict = self.get_feature_extractor()
lowercase__ : List[Any] = TvltProcessor(image_processor=a , feature_extractor=a)
processor.save_pretrained(self.tmpdirname)
lowercase__ : Optional[int] = TvltProcessor.from_pretrained(self.tmpdirname)
self.assertIsInstance(processor.feature_extractor , a)
self.assertIsInstance(processor.image_processor , a)
def snake_case_ ( self):
lowercase__ : Any = self.get_image_processor()
lowercase__ : str = self.get_feature_extractor()
lowercase__ : Optional[int] = TvltProcessor(image_processor=a , feature_extractor=a)
lowercase__ : List[Any] = np.ones([1_2000])
lowercase__ : str = feature_extractor(a , return_tensors='np')
lowercase__ : Union[str, Any] = processor(audio=a , return_tensors='np')
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2)
def snake_case_ ( self):
lowercase__ : Dict = self.get_image_processor()
lowercase__ : List[Any] = self.get_feature_extractor()
lowercase__ : List[Any] = TvltProcessor(image_processor=a , feature_extractor=a)
lowercase__ : Tuple = np.ones([3, 224, 224])
lowercase__ : Optional[int] = image_processor(a , return_tensors='np')
lowercase__ : Union[str, Any] = processor(images=a , return_tensors='np')
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2)
def snake_case_ ( self):
lowercase__ : Any = self.get_image_processor()
lowercase__ : List[str] = self.get_feature_extractor()
lowercase__ : str = TvltProcessor(image_processor=a , feature_extractor=a)
lowercase__ : List[Any] = np.ones([1_2000])
lowercase__ : Optional[int] = np.ones([3, 224, 224])
lowercase__ : Optional[Any] = processor(audio=a , images=a)
self.assertListEqual(list(inputs.keys()) , ['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask'])
# test if it raises when no input is passed
with pytest.raises(a):
processor()
def snake_case_ ( self):
lowercase__ : List[Any] = self.get_image_processor()
lowercase__ : Tuple = self.get_feature_extractor()
lowercase__ : List[str] = TvltProcessor(image_processor=a , feature_extractor=a)
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='`processor` and `image_processor`+`feature_extractor` model input names do not match' , )
| 214 |
from __future__ import annotations
def snake_case__ ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ , lowercase__ : List[str] = set(SCREAMING_SNAKE_CASE_ ), [start]
while stack:
lowercase__ : Union[str, Any] = stack.pop()
explored.add(SCREAMING_SNAKE_CASE_ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(SCREAMING_SNAKE_CASE_ )
return explored
snake_case_ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 214 | 1 |
"""simple docstring"""
import os
def _snake_case ( ):
_lowerCamelCase : Tuple = os.path.dirname(os.path.realpath(lowercase__ ) )
_lowerCamelCase : Any = os.path.join(lowercase__ , 'triangle.txt' )
with open(lowercase__ ) as f:
_lowerCamelCase : List[str] = f.readlines()
_lowerCamelCase : List[Any] = []
for line in triangle:
_lowerCamelCase : str = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(lowercase__ ) )
a.append(lowercase__ )
for i in range(1 , len(lowercase__ ) ):
for j in range(len(a[i] ) ):
_lowerCamelCase : List[str] = a[i - 1][j] if j != len(a[i - 1] ) else 0
_lowerCamelCase : Any = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowercase__ , lowercase__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution()) | 12 |
"""simple docstring"""
import re
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(lowercase__ , lowercase__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895""")) | 12 | 1 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
UpperCamelCase__ : Tuple = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
UpperCamelCase__ : Any = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
UpperCamelCase__ : Tuple = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
UpperCamelCase__ : int = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
UpperCamelCase__ : Any = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: List[str] ):
for tf_name, hf_name in patterns:
__SCREAMING_SNAKE_CASE : Dict = k.replace(_lowerCamelCase , _lowerCamelCase )
return k
def lowerCAmelCase_ ( _lowerCamelCase: dict , _lowerCamelCase: dict ):
__SCREAMING_SNAKE_CASE : Optional[int] = BigBirdPegasusConfig(**_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = BigBirdPegasusForConditionalGeneration(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch_model.state_dict()
__SCREAMING_SNAKE_CASE : Any = {}
# separating decoder weights
__SCREAMING_SNAKE_CASE : List[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
__SCREAMING_SNAKE_CASE : Tuple = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
__SCREAMING_SNAKE_CASE : Optional[int] = [k.endswith(_lowerCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_lowerCamelCase ):
continue
__SCREAMING_SNAKE_CASE : List[Any] = DECODER_PATTERNS
__SCREAMING_SNAKE_CASE : List[Any] = rename_state_dict_key(_lowerCamelCase , _lowerCamelCase )
if new_k not in state_dict:
raise ValueError(F"could not find new key {new_k} in state dict. (converted from {k})" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
__SCREAMING_SNAKE_CASE : Dict = v.T
__SCREAMING_SNAKE_CASE : int = torch.from_numpy(_lowerCamelCase )
assert v.shape == state_dict[new_k].shape, F"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
__SCREAMING_SNAKE_CASE : Tuple = [k.endswith(_lowerCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_lowerCamelCase ):
continue
__SCREAMING_SNAKE_CASE : List[str] = REMAINING_PATTERNS
__SCREAMING_SNAKE_CASE : Optional[Any] = rename_state_dict_key(_lowerCamelCase , _lowerCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"could not find new key {new_k} in state dict. (converted from {k})" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
__SCREAMING_SNAKE_CASE : Dict = v.T
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(_lowerCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"
__SCREAMING_SNAKE_CASE : Tuple = mapping["""model.embed_positions.weight"""]
__SCREAMING_SNAKE_CASE : int = mapping.pop("""model.embed_positions.weight""" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = torch_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], F"no matches found for the following tf keys {extra}"
return torch_model
def lowerCAmelCase_ ( _lowerCamelCase: Union[str, Any] ):
__SCREAMING_SNAKE_CASE : List[str] = tf.train.list_variables(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = {}
__SCREAMING_SNAKE_CASE : List[Any] = ["""global_step"""]
for name, shape in tqdm(_lowerCamelCase , desc="""converting tf checkpoint to dict""" ):
__SCREAMING_SNAKE_CASE : Dict = any(pat in name for pat in ignore_name )
if skip_key:
continue
__SCREAMING_SNAKE_CASE : Optional[int] = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = array
return tf_weights
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: dict ):
__SCREAMING_SNAKE_CASE : List[Any] = get_tf_weights_as_numpy(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = convert_bigbird_pegasus(_lowerCamelCase , _lowerCamelCase )
torch_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : int = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 112 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: float , _lowerCamelCase: list[float] ):
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
__SCREAMING_SNAKE_CASE : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_lowerCamelCase ) )
return round(_lowerCamelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 112 | 1 |
'''simple docstring'''
from functools import lru_cache
def SCREAMING_SNAKE_CASE__ ( __A ) -> set:
_snake_case = 2
_snake_case = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__A )
if n > 1:
factors.add(__A )
return factors
@lru_cache
def SCREAMING_SNAKE_CASE__ ( __A ) -> int:
return len(unique_prime_factors(__A ) )
def SCREAMING_SNAKE_CASE__ ( __A ) -> bool:
return len(set(__A ) ) in (0, 1)
def SCREAMING_SNAKE_CASE__ ( __A ) -> list:
_snake_case = 2
while True:
# Increment each value of a generated range
_snake_case = [base + i for i in range(__A )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_snake_case = [upf_len(__A ) for x in group]
checker.append(__A )
# If all numbers in the list are equal, return the group variable.
if equality(__A ):
return group
# Increment our base variable by 1
base += 1
def SCREAMING_SNAKE_CASE__ ( __A = 4 ) -> int:
_snake_case = run(__A )
return results[0] if len(__A ) else None
if __name__ == "__main__":
print(solution())
| 160 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = CanineTokenizer
__lowercase = False
def lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
_snake_case = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase ( self ):
"""simple docstring"""
return CanineTokenizer.from_pretrained('google/canine-s' )
def lowerCamelCase ( self , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
_snake_case = 10_24
return tokenizer
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.canine_tokenizer
_snake_case = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.']
# fmt: off
_snake_case = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.canine_tokenizer
_snake_case = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.']
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('input_ids' , lowerCAmelCase_ )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertIn('token_type_ids' , lowerCAmelCase_ )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.canine_tokenizer
_snake_case = [
'What\'s the weater?',
'It\'s about 25 degrees.',
]
_snake_case = tokenizer(
text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , truncation=lowerCAmelCase_ , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
_snake_case = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
shutil.rmtree(lowerCAmelCase_ )
_snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
_snake_case = chr(0XE_0_0_7 )
additional_special_tokens.append(lowerCAmelCase_ )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
_snake_case = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertIn(lowerCAmelCase_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_snake_case = tokenizer.__class__.from_pretrained(lowerCAmelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_snake_case , _snake_case = self.get_clean_sequence(lowerCAmelCase_ )
# a special token for Canine can be defined as follows:
_snake_case = 0XE_0_0_5
_snake_case = chr(lowerCAmelCase_ )
tokenizer.add_special_tokens({'cls_token': special_token} )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
_snake_case = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , input_encoded + special_token_id )
_snake_case = tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
self.assertTrue(special_token not in decoded )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_snake_case = chr(0XE_0_0_5 )
_snake_case = chr(0XE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCAmelCase_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]} )
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
self.assertEqual(token_a[0] , lowerCAmelCase_ )
self.assertEqual(token_a[0] , lowerCAmelCase_ )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
_snake_case = 0XE_0_0_6
_snake_case = chr(lowerCAmelCase_ )
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ )
tokenizer.add_special_tokens({'additional_special_tokens': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCAmelCase_ )
tokenizer.from_pretrained(lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowerCAmelCase_ )
# a special token for Canine can be defined as follows:
_snake_case = 0XE_0_0_6
_snake_case = chr(lowerCAmelCase_ )
_snake_case = [new_token_a]
_snake_case = [new_token_a]
with open(os.path.join(lowerCAmelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(lowerCAmelCase_ , extra_ids=0 )
self.assertIn(lowerCAmelCase_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
_snake_case = 0XE_0_0_7
_snake_case = chr(lowerCAmelCase_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = [AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ )]
_snake_case = tokenizer_class.from_pretrained(
lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , extra_ids=0 )
self.assertIn(lowerCAmelCase_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_snake_case = 'hello world'
if self.space_between_special_tokens:
_snake_case = '[CLS] hello world [SEP]'
else:
_snake_case = input
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.decode(lowerCAmelCase_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowerCAmelCase_ , [output, output.lower()] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_snake_case = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_snake_case = 'a'
_snake_case = ord(lowerCAmelCase_ )
for attr in attributes_list:
setattr(lowerCAmelCase_ , attr + '_id' , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + '_id' ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , attr + '_id' , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + '_id' ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens_ids' ) , [] )
_snake_case = 0XE_0_0_6
_snake_case = chr(lowerCAmelCase_ )
setattr(lowerCAmelCase_ , 'additional_special_tokens_ids' , [additional_special_token_id] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens' ) , [additional_special_token] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens_ids' ) , [additional_special_token_id] )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
| 160 | 1 |
from ... import PretrainedConfig
__snake_case : List[Any] ={
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
snake_case_ ="""nezha"""
def __init__(self ,__lowerCamelCase=2_11_28 ,__lowerCamelCase=7_68 ,__lowerCamelCase=12 ,__lowerCamelCase=12 ,__lowerCamelCase=30_72 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=5_12 ,__lowerCamelCase=64 ,__lowerCamelCase=2 ,__lowerCamelCase=0.02 ,__lowerCamelCase=1e-12 ,__lowerCamelCase=0.1 ,__lowerCamelCase=0 ,__lowerCamelCase=2 ,__lowerCamelCase=3 ,__lowerCamelCase=True ,**__lowerCamelCase ,) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=_A ,bos_token_id=_A ,eos_token_id=_A ,**_A )
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : Dict = hidden_size
lowerCAmelCase__ : Tuple = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : Tuple = attention_probs_dropout_prob
lowerCAmelCase__ : List[Any] = max_position_embeddings
lowerCAmelCase__ : List[str] = max_relative_position
lowerCAmelCase__ : str = type_vocab_size
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : Union[str, Any] = classifier_dropout
lowerCAmelCase__ : List[str] = use_cache
| 129 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __UpperCAmelCase ( A : List[str] , A : Any , A : Optional[int] , A : Optional[int] ) -> Optional[Any]:
if isinstance(A , A ):
UpperCAmelCase_ : Any = np.full((len(A ), sequence_length, 2) , A )
else:
UpperCAmelCase_ : int = np.full((len(A ), sequence_length) , A )
for i, tensor in enumerate(A ):
if padding_side == "right":
if isinstance(A , A ):
UpperCAmelCase_ : Tuple = tensor[:sequence_length]
else:
UpperCAmelCase_ : Dict = tensor[:sequence_length]
else:
if isinstance(A , A ):
UpperCAmelCase_ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase_ : int = tensor[:sequence_length]
return out_tensor.tolist()
def __UpperCAmelCase ( A : List[Any] ) -> str:
UpperCAmelCase_ : Dict = ord(A )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
UpperCAmelCase_ : Union[str, Any] = unicodedata.category(A )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class snake_case__ ( UpperCamelCase):
a_ = 42
a_ = True
a_ = None
a_ = None
a_ = -100
a_ = "pt"
def A ( self : List[Any] , _A : Dict ) -> Tuple:
import torch
UpperCAmelCase_ : Dict = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase_ : List[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase_ : Tuple = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase_ : Any = torch.tensor(batch['''entity_ids'''] ).shape[1]
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase_ : Optional[Any] = [
list(_A ) + [self.label_pad_token_id] * (sequence_length - len(_A )) for label in labels
]
else:
UpperCAmelCase_ : Any = [
[self.label_pad_token_id] * (sequence_length - len(_A )) + list(_A ) for label in labels
]
UpperCAmelCase_ : Union[str, Any] = [feature['''ner_tags'''] for feature in features]
UpperCAmelCase_ : Union[str, Any] = padding_tensor(_A , -1 , _A , _A )
UpperCAmelCase_ : List[str] = [feature['''original_entity_spans'''] for feature in features]
UpperCAmelCase_ : int = padding_tensor(_A , (-1, -1) , _A , _A )
UpperCAmelCase_ : Union[str, Any] = {k: torch.tensor(_A , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 304 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
SCREAMING_SNAKE_CASE_ : Tuple = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : str = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 369 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , *UpperCamelCase: List[str] , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Any=None ):
"""simple docstring"""
A__ = {}
if top_k is not None:
A__ = top_k
return {}, {}, postprocess_params
def __call__( self: Union[str, Any] , UpperCamelCase: Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCamelCase: Dict ):
"""simple docstring"""
return super().__call__(UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: Any , UpperCamelCase: int ):
"""simple docstring"""
A__ = load_image(UpperCamelCase )
A__ = self.image_processor(images=UpperCamelCase , return_tensors=self.framework )
return model_inputs
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Any ):
"""simple docstring"""
A__ = self.model(**UpperCamelCase )
return model_outputs
def UpperCamelCase ( self: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: int=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
A__ = self.model.config.num_labels
if self.framework == "pt":
A__ = model_outputs.logits.softmax(-1 )[0]
A__ , A__ = probs.topk(UpperCamelCase )
elif self.framework == "tf":
A__ = stable_softmax(model_outputs.logits , axis=-1 )[0]
A__ = tf.math.top_k(UpperCamelCase , k=UpperCamelCase )
A__ , A__ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
A__ = scores.tolist()
A__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase , UpperCamelCase )]
| 69 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
for char in word:
a :List[str] = ord(UpperCAmelCase_ )
if not _is_chinese_char(UpperCAmelCase_ ):
return 0
return 1
def __lowerCamelCase ( UpperCAmelCase_ : List[str] ):
"""simple docstring"""
a :Any = set()
for token in tokens:
a :str = len(UpperCAmelCase_ ) > 1 and is_chinese(UpperCAmelCase_ )
if chinese_word:
word_set.add(UpperCAmelCase_ )
a :Dict = list(UpperCAmelCase_ )
return word_list
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
a :List[str] = max([len(UpperCAmelCase_ ) for w in chinese_word_set] )
a :Union[str, Any] = bert_tokens
a , a :Tuple = 0, len(UpperCAmelCase_ )
while start < end:
a :Optional[Any] = True
if is_chinese(bert_word[start] ):
a :Optional[Any] = min(end - start , UpperCAmelCase_ )
for i in range(UpperCAmelCase_ , 1 , -1 ):
a :Dict = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a :Tuple = '''##''' + bert_word[j]
a :Union[str, Any] = start + i
a :Union[str, Any] = False
break
if single_word:
start += 1
return bert_word
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : LTP , UpperCAmelCase_ : BertTokenizer ):
"""simple docstring"""
a :List[str] = []
for i in range(0 , len(UpperCAmelCase_ ) , 100 ):
a :Optional[int] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['''cws'''] ).cws
a :str = [get_chinese_word(UpperCAmelCase_ ) for r in res]
ltp_res.extend(UpperCAmelCase_ )
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
a :List[Any] = []
for i in range(0 , len(UpperCAmelCase_ ) , 100 ):
a :Union[str, Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
a :Any = []
for input_ids, chinese_word in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
a :int = []
for id in input_ids:
a :Optional[Any] = bert_tokenizer._convert_id_to_token(UpperCAmelCase_ )
input_tokens.append(UpperCAmelCase_ )
a :int = add_sub_symbol(UpperCAmelCase_ , UpperCAmelCase_ )
a :str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase_ ):
if token[:2] == "##":
a :List[Any] = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase_ ) == 1 and _is_chinese_char(ord(UpperCAmelCase_ ) ):
ref_id.append(UpperCAmelCase_ )
ref_ids.append(UpperCAmelCase_ )
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
return ref_ids
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
a :Dict = f.readlines()
a :Dict = [line.strip() for line in data if len(UpperCAmelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a :Any = LTP(args.ltp ) # faster in GPU device
a :Optional[int] = BertTokenizer.from_pretrained(args.bert )
a :List[str] = prepare_ref(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
a :Union[str, Any] = [json.dumps(UpperCAmelCase_ ) + '''\n''' for ref in ref_ids]
f.writelines(UpperCAmelCase_ )
if __name__ == "__main__":
snake_case : Optional[Any] = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
snake_case : Tuple = parser.parse_args()
main(args)
| 94 |
from __future__ import annotations
def __lowerCamelCase ( UpperCAmelCase_ : dict , UpperCAmelCase_ : str ):
"""simple docstring"""
a , a :Optional[Any] = set(UpperCAmelCase_ ), [start]
while stack:
a :Optional[int] = stack.pop()
explored.add(UpperCAmelCase_ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(UpperCAmelCase_ )
return explored
snake_case : Optional[int] = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 94 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''roberta'''
def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple:
super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class lowerCamelCase ( __lowerCAmelCase ):
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 367 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase_ = False
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 12
@property
def _lowerCamelCase ( self ) -> Dict:
return 12
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 32
@property
def _lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
snake_case = VQModel(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, )
return model
@property
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self ) -> Tuple:
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModel(lowercase_ )
@property
def _lowerCamelCase ( self ) -> str:
torch.manual_seed(0 )
snake_case = 12
snake_case = 12
snake_case = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
snake_case = TransformeraDModel(**lowercase_ )
return model
def _lowerCamelCase ( self ) -> Tuple:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> str:
snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
snake_case = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipeline(
'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', )
snake_case = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 332 | 0 |
import os
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = os.path.dirname(os.path.realpath(A__ ) )
__lowerCamelCase = os.path.join(A__ , """triangle.txt""" )
with open(A__ ) as f:
__lowerCamelCase = f.readlines()
__lowerCamelCase = []
for line in triangle:
__lowerCamelCase = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(A__ ) )
a.append(A__ )
for i in range(1 , len(A__ ) ):
for j in range(len(a[i] ) ):
__lowerCamelCase = a[i - 1][j] if j != len(a[i - 1] ) else 0
__lowerCamelCase = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(A__ , A__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 12 |
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase__ ( A__ : list[list[int]] ):
'''simple docstring'''
__lowerCamelCase = []
for i in range(len(A__ ) ):
__lowerCamelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__lowerCamelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__lowerCamelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A__ )
return next_generation
def lowerCamelCase__ ( A__ : list[list[int]] , A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
for _ in range(A__ ):
# Create output image
__lowerCamelCase = Image.new("""RGB""" , (len(cells[0] ), len(A__ )) )
__lowerCamelCase = img.load()
# Save cells to image
for x in range(len(A__ ) ):
for y in range(len(cells[0] ) ):
__lowerCamelCase = 255 - cells[y][x] * 255
__lowerCamelCase = (colour, colour, colour)
# Save image
images.append(A__ )
__lowerCamelCase = new_generation(A__ )
return images
if __name__ == "__main__":
UpperCAmelCase_ = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 12 | 1 |
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
lowercase__ : List[str] = '''src/transformers'''
# Matches is_xxx_available()
lowercase__ : Any = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowercase__ : Optional[int] = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase__ : Dict = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowercase__ : List[str] = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowercase__ : Optional[int] = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase__ : List[str] = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase__ : Any = re.compile(r'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase__ : List[str] = re.compile(r'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowercase__ : Any = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowercase__ : Optional[Any] = re.compile(r'''^\s*try:''')
# Catches a line with else:
lowercase__ : Union[str, Any] = re.compile(r'''^\s*else:''')
def __lowercase ( _a ):
if _re_test_backend.search(_a ) is None:
return None
snake_case_ : Optional[int] = [b[0] for b in _re_backend.findall(_a )]
backends.sort()
return "_and_".join(_a )
def __lowercase ( _a ):
with open(_a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case_ : Any = f.readlines()
snake_case_ : Optional[Any] = 0
while line_index < len(_a ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_a ):
return None
# First grab the objects without a specific backend in _import_structure
snake_case_ : Optional[int] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
snake_case_ : int = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_a ):
snake_case_ : Dict = _re_one_line_import_struct.search(_a ).groups()[0]
snake_case_ : Dict = re.findall(r'''\[([^\]]+)\]''' , _a )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
snake_case_ : List[Any] = _re_import_struct_key_value.search(_a )
if single_line_import_search is not None:
snake_case_ : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_a ) > 0]
objects.extend(_a )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
snake_case_ : Tuple = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
snake_case_ : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case_ : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case_ : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
snake_case_ : Any = lines[line_index]
if _re_import_struct_add_one.search(_a ) is not None:
objects.append(_re_import_struct_add_one.search(_a ).groups()[0] )
elif _re_import_struct_add_many.search(_a ) is not None:
snake_case_ : int = _re_import_struct_add_many.search(_a ).groups()[0].split(''', ''' )
snake_case_ : str = [obj[1:-1] for obj in imports if len(_a ) > 0]
objects.extend(_a )
elif _re_between_brackets.search(_a ) is not None:
snake_case_ : Dict = _re_between_brackets.search(_a ).groups()[0].split(''', ''' )
snake_case_ : Optional[int] = [obj[1:-1] for obj in imports if len(_a ) > 0]
objects.extend(_a )
elif _re_quote_object.search(_a ) is not None:
objects.append(_re_quote_object.search(_a ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
snake_case_ : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
snake_case_ : List[str] = []
while (
line_index < len(_a )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
snake_case_ : List[Any] = lines[line_index]
snake_case_ : str = _re_import.search(_a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
snake_case_ : Any = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_a ):
# If the line is an if is_backend_available, we grab all objects associated.
snake_case_ : int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case_ : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case_ : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
snake_case_ : int = lines[line_index]
snake_case_ : int = _re_import.search(_a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
snake_case_ : Tuple = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowercase ( _a , _a ):
def find_duplicates(_a ):
return [k for k, v in collections.Counter(_a ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
snake_case_ : List[Any] = []
for key in import_dict_objects.keys():
snake_case_ : Dict = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" )
snake_case_ : Optional[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
snake_case_ : List[Any] = '''base imports''' if key == '''none''' else f"{key} backend"
errors.append(f"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT." )
return errors
def __lowercase ( ):
snake_case_ : List[str] = []
for root, _, files in os.walk(_a ):
if "__init__.py" in files:
snake_case_ : int = os.path.join(_a , '''__init__.py''' )
snake_case_ : Optional[Any] = parse_init(_a )
if objects is not None:
snake_case_ : Dict = analyze_results(*_a )
if len(_a ) > 0:
snake_case_ : Dict = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append('''\n'''.join(_a ) )
if len(_a ) > 0:
raise ValueError('''\n\n'''.join(_a ) )
def __lowercase ( ):
snake_case_ : List[str] = []
for path, directories, files in os.walk(_a ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_a )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_a ) / folder).glob('''*.py''' ) ) ) == 0:
continue
snake_case_ : Optional[int] = str((Path(_a ) / folder).relative_to(_a ) )
snake_case_ : Optional[Any] = short_path.replace(os.path.sep , '''.''' )
submodules.append(_a )
for fname in files:
if fname == "__init__.py":
continue
snake_case_ : Optional[int] = str((Path(_a ) / fname).relative_to(_a ) )
snake_case_ : Union[str, Any] = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_a )
return submodules
lowercase__ : int = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def __lowercase ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
snake_case_ : List[Any] = direct_transformers_import(_a )
snake_case_ : Optional[int] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_a , '''__init__.py''' ) , '''r''' ) as f:
snake_case_ : List[str] = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _a ) ) )
snake_case_ : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_a ) > 0:
snake_case_ : Dict = '''\n'''.join(f"- {module}" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
f"{list_of_modules}\n"
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 155 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : List[Any] , lowercase_ : NestedDataStructureLike[PathLike] , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Optional[Any] , ):
super().__init__(
lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , )
snake_case_ : List[Any] = path_or_paths if isinstance(lowercase_ , lowercase_ ) else {self.split: path_or_paths}
snake_case_ : str = Text(
cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , **lowercase_ , )
def _snake_case ( self : Any ):
# Build iterable dataset
if self.streaming:
snake_case_ : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case_ : List[Any] = None
snake_case_ : Optional[Any] = None
snake_case_ : str = None
snake_case_ : Optional[int] = None
self.builder.download_and_prepare(
download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , )
snake_case_ : Union[str, Any] = self.builder.as_dataset(
split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory )
return dataset
| 155 | 1 |
"""simple docstring"""
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
A = logging.get_logger(__name__)
class __lowercase :
'''simple docstring'''
__lowerCAmelCase = None
@experimental
def __A ( a_ :str , a_ :Union[str, Any] , a_ :Any , a_ :str , a_ :Tuple , a_ :List[str] , a_ :Union[str, Any]) -> List[str]:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
a_ , a_ , a_ , a_ , a_ , a_ , a_)
return _map_with_joblib(a_ , a_ , a_ , a_ , a_ , a_ , a_)
def __A ( a_ :Optional[int] , a_ :List[Any] , a_ :Any , a_ :List[Any] , a_ :str , a_ :Any , a_ :Any) -> str:
__a : List[str] = num_proc if num_proc <= len(a_) else len(a_)
__a : Dict = [] # We organize the splits ourselve (contiguous splits)
for index in range(a_):
__a : Dict = len(a_) // num_proc
__a : int = len(a_) % num_proc
__a : List[Any] = div * index + min(a_ , a_)
__a : Dict = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc))
if len(a_) != sum(len(i[1]) for i in split_kwds):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(a_)}, """
F"""length: {sum(len(i[1]) for i in split_kwds)}""")
logger.info(
F"""Spawning {num_proc} processes for {len(a_)} objects in slices of {[len(i[1]) for i in split_kwds]}""")
__a , __a : Optional[int] = None, None
if not disable_tqdm:
__a , __a : Tuple = (RLock(),), tqdm.set_lock
with Pool(a_ , initargs=a_ , initializer=a_) as pool:
__a : List[Any] = pool.map(a_ , a_)
logger.info(F"""Finished {num_proc} processes""")
__a : Any = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(a_)} objects""")
return mapped
def __A ( a_ :Optional[int] , a_ :Any , a_ :List[Any] , a_ :Optional[int] , a_ :Any , a_ :Any , a_ :Any) -> Optional[Any]:
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=a_):
return joblib.Parallel()(
joblib.delayed(a_)((function, obj, types, None, True, None)) for obj in iterable)
@experimental
@contextlib.contextmanager
def __A ( a_ :str) -> List[Any]:
__a : Union[str, Any] = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
__a : Tuple = None | 160 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
A = logging.get_logger(__name__)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) | 160 | 1 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# ===== initialization =====
_SCREAMING_SNAKE_CASE : List[Any] = Mock()
_SCREAMING_SNAKE_CASE : Optional[Any] = conn, Mock()
_SCREAMING_SNAKE_CASE : Dict = iter([1, None] )
_SCREAMING_SNAKE_CASE : Optional[Any] = lambda __lowerCamelCase : next(__lowerCamelCase )
# ===== invoke =====
send_file(filename="mytext.txt", testing=__lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once() | 325 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = BlenderbotSmallConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=2_0 , __lowerCamelCase=2 , __lowerCamelCase=1 , __lowerCamelCase=0 , ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : Dict = seq_length
_SCREAMING_SNAKE_CASE : List[str] = is_training
_SCREAMING_SNAKE_CASE : List[str] = use_labels
_SCREAMING_SNAKE_CASE : Dict = vocab_size
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
_SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id
_SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE : List[Any] = prepare_blenderbot_small_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Any = TFBlenderbotSmallModel(config=__lowerCamelCase ).get_decoder()
_SCREAMING_SNAKE_CASE : Dict = inputs_dict["input_ids"]
_SCREAMING_SNAKE_CASE : List[Any] = input_ids[:1, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict["attention_mask"][:1, :]
_SCREAMING_SNAKE_CASE : List[str] = inputs_dict["head_mask"]
_SCREAMING_SNAKE_CASE : int = 1
# first forward pass
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1E-3 )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(tf.math.not_equal(__lowerCamelCase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__snake_case = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFBlenderbotSmallModelTester(self )
_SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
@require_tokenizers
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
__snake_case = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__snake_case = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase_ ( self ) -> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(self.src_text , return_tensors="tf" )
_SCREAMING_SNAKE_CASE : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
) | 325 | 1 |
"""simple docstring"""
_a = 6_55_21
def _A ( UpperCamelCase_ : str) -> int:
'''simple docstring'''
__lowercase = 1
__lowercase = 0
for plain_chr in plain_text:
__lowercase = (a + ord(UpperCamelCase_)) % MOD_ADLER
__lowercase = (b + a) % MOD_ADLER
return (b << 16) | a
| 17 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__UpperCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__UpperCamelCase = TaTokenizerFast
__UpperCamelCase = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__UpperCamelCase = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 69 | 0 |
import re
def a( A : str ) -> bool:
"""simple docstring"""
a = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(A , A ) )
if __name__ == "__main__":
_lowercase: Tuple = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 357 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_lowercase: Any = None
_lowercase: Optional[int] = logging.get_logger(__name__)
_lowercase: str = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
_lowercase: Tuple = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
_lowercase: List[Any] = {
"google/rembert": 256,
}
_lowercase: Dict = "▁"
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = RemBertTokenizer
def __init__(self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="[CLS]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="<unk>" , lowerCamelCase_="[SEP]" , lowerCamelCase_="<pad>" , lowerCamelCase_="[CLS]" , lowerCamelCase_="[MASK]" , **lowerCamelCase_ , ):
"""simple docstring"""
a = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , )
a = do_lower_case
a = remove_space
a = keep_accents
a = vocab_file
a = False if not self.vocab_file else True
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowerCamelCase_ ) )
return
a = os.path.join(
lowerCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 71 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__a = logging.get_logger(__name__)
class UpperCAmelCase_ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *snake_case_ : Any , **snake_case_ : List[Any] ):
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 35 |
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowercase__ ( snake_case_ :Optional[int] ):
return EnvironmentCommand()
def lowercase__ ( snake_case_ :List[str] ):
return EnvironmentCommand(args.accelerate_config_file )
class _UpperCAmelCase ( _lowerCAmelCase ):
@staticmethod
def a ( _lowercase : ArgumentParser ):
__UpperCAmelCase = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
download_parser.add_argument(
'''--accelerate-config_file''' , default=_lowercase , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=_lowercase )
def __init__( self : Optional[int] , _lowercase : str , *_lowercase : Tuple ):
__UpperCAmelCase = accelerate_config_file
def a ( self : Dict ):
__UpperCAmelCase = '''not installed'''
if is_safetensors_available():
import safetensors
__UpperCAmelCase = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
__UpperCAmelCase = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = __UpperCAmelCase = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__UpperCAmelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_lowercase ):
__UpperCAmelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
__UpperCAmelCase = (
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_lowercase , _lowercase )
else F'''\t{accelerate_config}'''
)
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''NA'''
if is_torch_available():
import torch
__UpperCAmelCase = torch.__version__
__UpperCAmelCase = torch.cuda.is_available()
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''NA'''
if is_tf_available():
import tensorflow as tf
__UpperCAmelCase = tf.__version__
try:
# deprecated in v2.1
__UpperCAmelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__UpperCAmelCase = bool(tf.config.list_physical_devices('''GPU''' ) )
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
__UpperCAmelCase = flax.__version__
__UpperCAmelCase = jax.__version__
__UpperCAmelCase = jaxlib.__version__
__UpperCAmelCase = jax.lib.xla_bridge.get_backend().platform
__UpperCAmelCase = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F'''{safetensors_version}''',
'''Accelerate version''': F'''{accelerate_version}''',
'''Accelerate config''': F'''{accelerate_config_str}''',
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''Tensorflow version (GPU?)''': F'''{tf_version} ({tf_cuda_available})''',
'''Flax version (CPU?/GPU?/TPU?)''': F'''{flax_version} ({jax_backend})''',
'''Jax version''': F'''{jax_version}''',
'''JaxLib version''': F'''{jaxlib_version}''',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def a ( _lowercase : str ):
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 332 | 0 |
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
UpperCamelCase_ : Tuple = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def __a ( _UpperCamelCase: str = "dhaka" , _UpperCamelCase: int = 5 ) -> int:
"""simple docstring"""
_snake_case = min(__lowerCamelCase , 50 ) # Prevent abuse!
_snake_case = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
_snake_case = requests.get("https://www.google.com/search" , params=__lowerCamelCase , headers=__lowerCamelCase )
_snake_case = BeautifulSoup(html.text , "html.parser" )
_snake_case = "".join(
re.findall(r"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
_snake_case = json.dumps(__lowerCamelCase )
_snake_case = json.loads(__lowerCamelCase )
_snake_case = re.findall(
r"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , __lowerCamelCase , )
if not matched_google_image_data:
return 0
_snake_case = re.sub(
r"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(__lowerCamelCase ) , )
_snake_case = re.findall(
r"(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , __lowerCamelCase , )
for index, fixed_full_res_image in enumerate(__lowerCamelCase ):
if index >= max_images:
return index
_snake_case = bytes(__lowerCamelCase , "ascii" ).decode(
"unicode-escape" )
_snake_case = bytes(__lowerCamelCase , "ascii" ).decode(
"unicode-escape" )
_snake_case = urllib.request.build_opener()
_snake_case = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(__lowerCamelCase )
_snake_case = F"""query_{query.replace(' ' , '_' )}"""
if not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
urllib.request.urlretrieve( # noqa: S310
__lowerCamelCase , F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
UpperCamelCase_ : Any = download_images_from_google_query(sys.argv[1])
print(F'{image_count} images were downloaded to disk.')
except IndexError:
print('''Please provide a search term.''')
raise
| 368 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def __a ( _UpperCamelCase: int ) -> str:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
_snake_case = precision
_snake_case = ceil(precision / 14 )
_snake_case = 426_880 * Decimal(10_005 ).sqrt()
_snake_case = 1
_snake_case = 13_591_409
_snake_case = Decimal(_UpperCamelCase )
for k in range(1 , _UpperCamelCase ):
_snake_case = factorial(6 * k ) // (factorial(3 * k ) * factorial(_UpperCamelCase ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCamelCase_ : Any = 50
print(F'The first {n} digits of pi is: {pi(n)}')
| 142 | 0 |
"""simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def lowercase (snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : int , ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
lowerCAmelCase , lowerCAmelCase = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowerCAmelCase = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
assert base_extractor.is_extractable(snake_case__ )
lowerCAmelCase = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(snake_case__ , snake_case__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowerCAmelCase = file_path.read_text(encoding="""utf-8""" )
else:
lowerCAmelCase = output_path.read_text(encoding="""utf-8""" )
lowerCAmelCase = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def lowercase (snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , ) -> str:
'''simple docstring'''
lowerCAmelCase = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
lowerCAmelCase = input_paths[compression_format]
if input_path is None:
lowerCAmelCase = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
lowerCAmelCase = Extractor.infer_extractor_format(snake_case__ )
assert extractor_format is not None
lowerCAmelCase = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(snake_case__ , snake_case__ , snake_case__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowerCAmelCase = file_path.read_text(encoding="""utf-8""" )
else:
lowerCAmelCase = output_path.read_text(encoding="""utf-8""" )
lowerCAmelCase = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowercase (snake_case__ : List[Any] , snake_case__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
import tarfile
lowerCAmelCase = tmp_path / """data_dot_dot"""
directory.mkdir()
lowerCAmelCase = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(snake_case__ , """w""" ) as f:
f.add(snake_case__ , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def lowercase (snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
import tarfile
lowerCAmelCase = tmp_path / """data_sym_link"""
directory.mkdir()
lowerCAmelCase = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=snake_case__ )
with tarfile.TarFile(snake_case__ , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def lowercase (snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Any ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
lowerCAmelCase = insecure_tar_files[insecure_tar_file]
lowerCAmelCase = tmp_path / """extracted"""
TarExtractor.extract(snake_case__ , snake_case__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowercase (snake_case__ : str ) -> Dict:
'''simple docstring'''
lowerCAmelCase = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
lowerCAmelCase = (
B"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
B"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
B"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
B"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(snake_case__ )
assert zipfile.is_zipfile(str(snake_case__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(snake_case__ ) # but we're right
| 155 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
a = logging.get_logger(__name__)
a = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'dpt'
def __init__( self : int , lowerCAmelCase : List[str]=768 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : Any=12 , lowerCAmelCase : str=3072 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : str=0.02 , lowerCAmelCase : str=1e-12 , lowerCAmelCase : Optional[Any]=384 , lowerCAmelCase : str=16 , lowerCAmelCase : int=3 , lowerCAmelCase : Tuple=False , lowerCAmelCase : Any=True , lowerCAmelCase : Tuple=[2, 5, 8, 11] , lowerCAmelCase : Tuple="project" , lowerCAmelCase : Optional[int]=[4, 2, 1, 0.5] , lowerCAmelCase : Any=[96, 192, 384, 768] , lowerCAmelCase : int=256 , lowerCAmelCase : List[Any]=-1 , lowerCAmelCase : Any=False , lowerCAmelCase : int=True , lowerCAmelCase : List[str]=0.4 , lowerCAmelCase : Dict=255 , lowerCAmelCase : int=0.1 , lowerCAmelCase : List[Any]=[1, 1024, 24, 24] , lowerCAmelCase : str=[0, 1] , lowerCAmelCase : str=None , **lowerCAmelCase : Optional[Any] , ):
super().__init__(**lowerCAmelCase )
lowerCAmelCase = hidden_size
lowerCAmelCase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowerCAmelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
lowerCAmelCase = BitConfig(**lowerCAmelCase )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowerCAmelCase = BitConfig(**lowerCAmelCase )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
lowerCAmelCase = backbone_featmap_shape
lowerCAmelCase = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = []
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = qkv_bias
lowerCAmelCase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
lowerCAmelCase = readout_type
lowerCAmelCase = reassemble_factors
lowerCAmelCase = neck_hidden_sizes
lowerCAmelCase = fusion_hidden_size
lowerCAmelCase = head_in_index
lowerCAmelCase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase = use_auxiliary_head
lowerCAmelCase = auxiliary_loss_weight
lowerCAmelCase = semantic_loss_ignore_index
lowerCAmelCase = semantic_classifier_dropout
def __lowercase ( self : Any ):
lowerCAmelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase = self.backbone_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
| 155 | 1 |
def a_ ( __lowercase : Dict ) -> Tuple:
_snake_case = len(__lowercase )
_snake_case = sum(__lowercase )
_snake_case = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_snake_case = True
for i in range(1 , s + 1 ):
_snake_case = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_snake_case = dp[i][j - 1]
if arr[i - 1] <= j:
_snake_case = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_snake_case = s - 2 * j
break
return diff | 130 |
import baseaa
def a_ ( __lowercase : str ) -> bytes:
return baseaa.aaaencode(string.encode('utf-8' ) )
def a_ ( __lowercase : bytes ) -> str:
return baseaa.aaadecode(__lowercase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 130 | 1 |
from PIL import Image
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Image , SCREAMING_SNAKE_CASE : int ) -> Image:
__lowercase = (259 * (level + 255)) / (255 * (259 - level))
def contrast(SCREAMING_SNAKE_CASE : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
SCREAMING_SNAKE_CASE__ = change_contrast(img, 170)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 325 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = "transfo-xl"
lowerCAmelCase__ : int = ["mems"]
lowerCAmelCase__ : Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , _UpperCAmelCase : Tuple=26_77_35 , _UpperCAmelCase : Any=[2_00_00, 4_00_00, 20_00_00] , _UpperCAmelCase : Tuple=10_24 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Tuple=40_96 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]=18 , _UpperCAmelCase : int=16_00 , _UpperCAmelCase : Optional[int]=10_00 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Optional[Any]=-1 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : int="normal" , _UpperCAmelCase : int=0.01 , _UpperCAmelCase : List[Any]=0.01 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] , ) -> Tuple:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = []
self.cutoffs.extend(_UpperCAmelCase )
if proj_share_all_but_first:
__lowercase = [False] + [True] * len(self.cutoffs )
else:
__lowercase = [False] + [False] * len(self.cutoffs )
__lowercase = d_model
__lowercase = d_embed
__lowercase = d_head
__lowercase = d_inner
__lowercase = div_val
__lowercase = pre_lnorm
__lowercase = n_layer
__lowercase = n_head
__lowercase = mem_len
__lowercase = same_length
__lowercase = attn_type
__lowercase = clamp_len
__lowercase = sample_softmax
__lowercase = adaptive
__lowercase = dropout
__lowercase = dropatt
__lowercase = untie_r
__lowercase = init
__lowercase = init_range
__lowercase = proj_init_std
__lowercase = init_std
__lowercase = layer_norm_epsilon
super().__init__(eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a__ ( self : Dict , _UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 325 | 1 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
snake_case_ : Union[str, Any] = re.compile(r'''\s+''')
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(SCREAMING_SNAKE_CASE_ , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : Optional[int] = [len(SCREAMING_SNAKE_CASE_ ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(SCREAMING_SNAKE_CASE_ ), "line_max": max(SCREAMING_SNAKE_CASE_ )}
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : str = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str=5 ):
'''simple docstring'''
lowercase__ : int = ['auto-generated', 'autogenerated', 'automatically generated']
lowercase__ : Union[str, Any] = example['content'].splitlines()
for _, line in zip(range(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any]=5 , SCREAMING_SNAKE_CASE_ : List[Any]=0.05 ):
'''simple docstring'''
lowercase__ : Any = ['unit tests', 'test file', 'configuration file']
lowercase__ : Union[str, Any] = example['content'].splitlines()
lowercase__ : Any = 0
lowercase__ : Optional[int] = 0
# first test
for _, line in zip(range(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
lowercase__ : Any = example['content'].count('\n' )
lowercase__ : List[str] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
lowercase__ : int = ['def ', 'class ', 'for ', 'while ']
lowercase__ : Any = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int]=4 ):
'''simple docstring'''
lowercase__ : Tuple = example['content'].splitlines()
lowercase__ : Union[str, Any] = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
lowercase__ : Optional[Any] = tokenizer(example['content'] , truncation=SCREAMING_SNAKE_CASE_ )['input_ids']
lowercase__ : str = len(example['content'] ) / len(SCREAMING_SNAKE_CASE_ )
return {"ratio": ratio}
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
lowercase__ : int = {}
results.update(get_hash(SCREAMING_SNAKE_CASE_ ) )
results.update(line_stats(SCREAMING_SNAKE_CASE_ ) )
results.update(alpha_stats(SCREAMING_SNAKE_CASE_ ) )
results.update(char_token_ratio(SCREAMING_SNAKE_CASE_ ) )
results.update(is_autogenerated(SCREAMING_SNAKE_CASE_ ) )
results.update(is_config_or_test(SCREAMING_SNAKE_CASE_ ) )
results.update(has_no_keywords(SCREAMING_SNAKE_CASE_ ) )
results.update(has_few_assignments(SCREAMING_SNAKE_CASE_ ) )
return results
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
if not check_uniques(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as f_in:
with gzip.open(str(SCREAMING_SNAKE_CASE_ ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
os.unlink(SCREAMING_SNAKE_CASE_ )
# Settings
snake_case_ : Union[str, Any] = HfArgumentParser(PreprocessingArguments)
snake_case_ : List[Any] = parser.parse_args()
if args.num_workers is None:
snake_case_ : List[Any] = multiprocessing.cpu_count()
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
snake_case_ : int = time.time()
snake_case_ : str = load_dataset(args.dataset_name, split='''train''')
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
snake_case_ : Optional[Any] = time.time()
snake_case_ : Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
snake_case_ : str = set(ds.unique('''hash'''))
snake_case_ : Union[str, Any] = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
snake_case_ : List[Any] = time.time()
snake_case_ : str = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
snake_case_ : List[Any] = time.time()
snake_case_ , snake_case_ : Optional[int] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
snake_case_ : Tuple = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
snake_case_ : Union[str, Any] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
snake_case_ : List[str] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
snake_case_ : List[str] = str(data_dir / F'''file-{file_number+1:012}.json''')
snake_case_ : Union[str, Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 365 |
import math
import sys
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if number != int(SCREAMING_SNAKE_CASE_ ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
lowercase__ : Tuple = [-1] * (number + 1)
lowercase__ : Tuple = 0
for i in range(1 , number + 1 ):
lowercase__ : Tuple = sys.maxsize
lowercase__ : str = int(math.sqrt(SCREAMING_SNAKE_CASE_ ) )
for j in range(1 , root + 1 ):
lowercase__ : List[Any] = 1 + answers[i - (j**2)]
lowercase__ : str = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Dict = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 56 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A_ :Optional[Any] = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def A ( a_ ) -> List[Any]:
__UpperCamelCase : Any =['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(a_ ,a_ )
A_ :int = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def A ( a_ ) -> Union[str, Any]:
__UpperCamelCase : str =list(s_dict.keys() )
for key in keys:
__UpperCamelCase : str =key
for k, v in WHISPER_MAPPING.items():
if k in key:
__UpperCamelCase : Optional[Any] =new_key.replace(a_ ,a_ )
print(F'{key} -> {new_key}' )
__UpperCamelCase : Dict =s_dict.pop(a_ )
return s_dict
def A ( a_ ) -> Optional[Any]:
__UpperCamelCase , __UpperCamelCase : Tuple =emb.weight.shape
__UpperCamelCase : Tuple =nn.Linear(a_ ,a_ ,bias=a_ )
__UpperCamelCase : List[Any] =emb.weight.data
return lin_layer
def A ( a_ ,a_ ) -> bytes:
os.makedirs(a_ ,exist_ok=a_ )
__UpperCamelCase : Optional[int] =os.path.basename(a_ )
__UpperCamelCase : Union[str, Any] =url.split('/' )[-2]
__UpperCamelCase : Union[str, Any] =os.path.join(a_ ,a_ )
if os.path.exists(a_ ) and not os.path.isfile(a_ ):
raise RuntimeError(F'{download_target} exists and is not a regular file' )
if os.path.isfile(a_ ):
__UpperCamelCase : str =open(a_ ,'rb' ).read()
if hashlib.shaaaa(a_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' )
with urllib.request.urlopen(a_ ) as source, open(a_ ,'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) ,ncols=80 ,unit='iB' ,unit_scale=a_ ,unit_divisor=1_024 ) as loop:
while True:
__UpperCamelCase : Optional[Any] =source.read(8_192 )
if not buffer:
break
output.write(a_ )
loop.update(len(a_ ) )
__UpperCamelCase : List[Any] =open(a_ ,'rb' ).read()
if hashlib.shaaaa(a_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def A ( a_ ,a_ ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
__UpperCamelCase : int =_download(_MODELS[checkpoint_path] )
else:
__UpperCamelCase : List[str] =torch.load(a_ ,map_location='cpu' )
__UpperCamelCase : Union[str, Any] =original_checkpoint['dims']
__UpperCamelCase : List[Any] =original_checkpoint['model_state_dict']
__UpperCamelCase : Dict =state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(a_ )
rename_keys(a_ )
__UpperCamelCase : List[str] =True
__UpperCamelCase : str =state_dict['decoder.layers.0.fc1.weight'].shape[0]
__UpperCamelCase : Optional[int] =WhisperConfig(
vocab_size=dimensions['n_vocab'] ,encoder_ffn_dim=a_ ,decoder_ffn_dim=a_ ,num_mel_bins=dimensions['n_mels'] ,d_model=dimensions['n_audio_state'] ,max_target_positions=dimensions['n_text_ctx'] ,encoder_layers=dimensions['n_audio_layer'] ,encoder_attention_heads=dimensions['n_audio_head'] ,decoder_layers=dimensions['n_text_layer'] ,decoder_attention_heads=dimensions['n_text_state'] ,max_source_positions=dimensions['n_audio_ctx'] ,)
__UpperCamelCase : List[str] =WhisperForConditionalGeneration(a_ )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =model.model.load_state_dict(a_ ,strict=a_ )
if len(a_ ) > 0 and not set(a_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F' but all the following weights are missing {missing}' )
if tie_embeds:
__UpperCamelCase : Optional[int] =make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__UpperCamelCase : List[str] =proj_out_weights
model.save_pretrained(a_ )
if __name__ == "__main__":
A_ :List[Any] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
A_ :List[Any] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 71 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Tuple:
snake_case_ = """laion/clap-htsat-unfused"""
snake_case_ = tempfile.mkdtemp()
def a_ ( self, **lowerCAmelCase__) -> str:
return RobertaTokenizer.from_pretrained(self.checkpoint, **lowercase_)
def a_ ( self, **lowerCAmelCase__) -> Dict:
return ClapFeatureExtractor.from_pretrained(self.checkpoint, **lowercase_)
def a_ ( self) -> List[str]:
shutil.rmtree(self.tmpdirname)
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_feature_extractor()
snake_case_ = ClapProcessor(tokenizer=lowercase_, feature_extractor=lowercase_)
processor.save_pretrained(self.tmpdirname)
snake_case_ = ClapProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, lowercase_)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor, lowercase_)
def a_ ( self) -> int:
snake_case_ = ClapProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor())
processor.save_pretrained(self.tmpdirname)
snake_case_ = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
snake_case_ = self.get_feature_extractor(do_normalize=lowercase_, padding_value=1.0)
snake_case_ = ClapProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowercase_, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, lowercase_)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, lowercase_)
def a_ ( self) -> Any:
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_tokenizer()
snake_case_ = ClapProcessor(tokenizer=lowercase_, feature_extractor=lowercase_)
snake_case_ = floats_list((3, 1000))
snake_case_ = feature_extractor(lowercase_, return_tensors='np')
snake_case_ = processor(audios=lowercase_, return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
def a_ ( self) -> str:
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_tokenizer()
snake_case_ = ClapProcessor(tokenizer=lowercase_, feature_extractor=lowercase_)
snake_case_ = """This is a test string"""
snake_case_ = processor(text=lowercase_)
snake_case_ = tokenizer(lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_tokenizer()
snake_case_ = ClapProcessor(tokenizer=lowercase_, feature_extractor=lowercase_)
snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ = processor.batch_decode(lowercase_)
snake_case_ = tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_, lowercase_)
def a_ ( self) -> int:
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_tokenizer()
snake_case_ = ClapProcessor(tokenizer=lowercase_, feature_extractor=lowercase_)
self.assertListEqual(
processor.model_input_names[2:], feature_extractor.model_input_names, msg='`processor` and `feature_extractor` model input names do not match', )
| 371 | """simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def a_ ( self, lowerCAmelCase__=0) -> List[Any]:
snake_case_ = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase__))
snake_case_ = np.random.RandomState(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Optional[Any]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def a_ ( self) -> List[str]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> str:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
# warmup pass to apply optimizations
snake_case_ = pipe(**self.get_dummy_inputs())
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> int:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
@property
def a_ ( self) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self) -> str:
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def a_ ( self) -> Any:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
# using the PNDM scheduler by default
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def a_ ( self) -> List[Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
snake_case_ = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 312 | 0 |
'''simple docstring'''
_lowerCAmelCase = frozenset(
[
'''prompt''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
_lowerCAmelCase = frozenset(['''prompt''', '''negative_prompt'''])
_lowerCAmelCase = frozenset([])
_lowerCAmelCase = frozenset(['''image'''])
_lowerCAmelCase = frozenset(
[
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
_lowerCAmelCase = frozenset(['''image'''])
_lowerCAmelCase = frozenset(
[
'''prompt''',
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
_lowerCAmelCase = frozenset(['''prompt''', '''image''', '''negative_prompt'''])
_lowerCAmelCase = frozenset(
[
# Text guided image variation with an image mask
'''prompt''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
_lowerCAmelCase = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt'''])
_lowerCAmelCase = frozenset(
[
# image variation with an image mask
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
_lowerCAmelCase = frozenset(['''image''', '''mask_image'''])
_lowerCAmelCase = frozenset(
[
'''example_image''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
_lowerCAmelCase = frozenset(['''example_image''', '''image''', '''mask_image'''])
_lowerCAmelCase = frozenset(['''class_labels'''])
_lowerCAmelCase = frozenset(['''class_labels'''])
_lowerCAmelCase = frozenset(['''batch_size'''])
_lowerCAmelCase = frozenset([])
_lowerCAmelCase = frozenset(['''batch_size'''])
_lowerCAmelCase = frozenset([])
_lowerCAmelCase = frozenset(
[
'''prompt''',
'''audio_length_in_s''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
_lowerCAmelCase = frozenset(['''prompt''', '''negative_prompt'''])
_lowerCAmelCase = frozenset(['''input_tokens'''])
_lowerCAmelCase = frozenset(['''input_tokens'''])
| 37 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
_A : Any = namedtuple('covid_data', 'cases deaths recovered')
def _a ( UpperCAmelCase = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(UpperCAmelCase ).content ).xpath(UpperCAmelCase ) )
_A : Dict = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 142 | 0 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCAmelCase_ ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : Tuple ) -> int:
'''simple docstring'''
return (-y * np.log(_snake_case ) - (1 - y) * np.log(1 - h )).mean()
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : List[Any] , _snake_case : int ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Any = np.dot(_snake_case , _snake_case )
return np.sum(y * scores - np.log(1 + np.exp(_snake_case ) ) )
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : List[Any]=70000 ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Optional[Any] = np.zeros(x.shape[1] )
for iterations in range(_snake_case ):
__magic_name__ : Dict = np.dot(_snake_case , _snake_case )
__magic_name__ : Tuple = sigmoid_function(_snake_case )
__magic_name__ : List[str] = np.dot(x.T , h - y ) / y.size
__magic_name__ : Union[str, Any] = theta - alpha * gradient # updating the weights
__magic_name__ : Optional[Any] = np.dot(_snake_case , _snake_case )
__magic_name__ : List[Any] = sigmoid_function(_snake_case )
__magic_name__ : Tuple = cost_function(_snake_case , _snake_case )
if iterations % 100 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
snake_case : Union[str, Any] = datasets.load_iris()
snake_case : Tuple = iris.data[:, :2]
snake_case : Optional[Any] = (iris.target != 0) * 1
snake_case : int = 0.1
snake_case : int = logistic_reg(alpha, x, y, max_iterations=70_000)
print("theta: ", theta) # printing the theta i.e our weights vector
def lowerCAmelCase_ ( _snake_case : Dict ) -> Dict:
'''simple docstring'''
return sigmoid_function(
np.dot(_snake_case , _snake_case ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((snake_case) ,(snake_case)) : Any = (x[:, 0].min(), x[:, 0].max())
((snake_case) ,(snake_case)) : Optional[Any] = (x[:, 1].min(), x[:, 1].max())
((snake_case) ,(snake_case)) : int = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
snake_case : List[Any] = np.c_[xxa.ravel(), xxa.ravel()]
snake_case : Any = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 41 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : Optional[int] = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'donut-swin'
UpperCamelCase__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _a=224 , _a=4 , _a=3 , _a=96 , _a=[2, 2, 6, 2] , _a=[3, 6, 12, 24] , _a=7 , _a=4.0 , _a=True , _a=0.0 , _a=0.0 , _a=0.1 , _a="gelu" , _a=False , _a=0.02 , _a=1e-5 , **_a , ):
super().__init__(**_a )
__magic_name__ : Optional[int] = image_size
__magic_name__ : Any = patch_size
__magic_name__ : Tuple = num_channels
__magic_name__ : Dict = embed_dim
__magic_name__ : Dict = depths
__magic_name__ : int = len(_a )
__magic_name__ : str = num_heads
__magic_name__ : Tuple = window_size
__magic_name__ : Dict = mlp_ratio
__magic_name__ : List[str] = qkv_bias
__magic_name__ : Any = hidden_dropout_prob
__magic_name__ : str = attention_probs_dropout_prob
__magic_name__ : Union[str, Any] = drop_path_rate
__magic_name__ : List[Any] = hidden_act
__magic_name__ : List[Any] = use_absolute_embeddings
__magic_name__ : Union[str, Any] = layer_norm_eps
__magic_name__ : Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__magic_name__ : Tuple = int(embed_dim * 2 ** (len(_a ) - 1) )
| 41 | 1 |
from functools import lru_cache
@lru_cache
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 130 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True ):
"""simple docstring"""
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowercase__ : Union[str, Any] = timm.create_model("levit_128s" , pretrained=lowerCamelCase__ )
else:
lowercase__ : Union[str, Any] = timm.create_model("levit_128" , pretrained=lowerCamelCase__ )
if hidden_sizes == 192:
lowercase__ : Dict = timm.create_model("levit_192" , pretrained=lowerCamelCase__ )
if hidden_sizes == 256:
lowercase__ : Optional[Any] = timm.create_model("levit_256" , pretrained=lowerCamelCase__ )
if hidden_sizes == 384:
lowercase__ : List[str] = timm.create_model("levit_384" , pretrained=lowerCamelCase__ )
from_model.eval()
lowercase__ : Union[str, Any] = LevitForImageClassificationWithTeacher(lowerCamelCase__ ).eval()
lowercase__ : Tuple = OrderedDict()
lowercase__ : Dict = from_model.state_dict()
lowercase__ : Union[str, Any] = list(from_model.state_dict().keys() )
lowercase__ : Any = list(our_model.state_dict().keys() )
print(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : Union[str, Any] = weights[og_keys[i]]
our_model.load_state_dict(lowerCamelCase__ )
lowercase__ : List[str] = torch.randn((2, 3, 224, 224) )
lowercase__ : Optional[Any] = from_model(lowerCamelCase__ )
lowercase__ : Optional[Any] = our_model(lowerCamelCase__ ).logits
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ ), "The model logits don't match the original one."
lowercase__ : Optional[Any] = name
print(lowerCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase__ : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True ):
"""simple docstring"""
lowercase__ : Optional[Any] = "imagenet-1k-id2label.json"
lowercase__ : str = 1_000
lowercase__ : Any = (1, num_labels)
lowercase__ : Optional[Any] = "huggingface/label-files"
lowercase__ : Optional[Any] = num_labels
lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : Dict = idalabel
lowercase__ : str = {v: k for k, v in idalabel.items()}
lowercase__ : Optional[Any] = partial(lowerCamelCase__ , num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ )
lowercase__ : List[str] = {
"levit-128S": 128,
"levit-128": 128,
"levit-192": 192,
"levit-256": 256,
"levit-384": 384,
}
lowercase__ : int = {
"levit-128S": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-128": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-192": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-256": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-384": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , lowerCamelCase__ , names_to_config[model_name] , lowerCamelCase__ , lowerCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 130 | 1 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def SCREAMING_SNAKE_CASE__ ( __a ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def SCREAMING_SNAKE_CASE__ ( ):
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
snake_case_ : Optional[Any] = [1, 2, 3]
with pytest.raises(__a ):
with parallel_backend('unsupported backend' ):
map_nested(__a , __a , num_proc=2 )
with pytest.raises(__a ):
with parallel_backend('unsupported backend' ):
map_nested(__a , __a , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Optional[Any] = [1, 2]
snake_case_ : Optional[Any] = {'a': 1, 'b': 2}
snake_case_ : int = {'a': [1, 2], 'b': [3, 4]}
snake_case_ : int = {'a': {'1': 1}, 'b': 2}
snake_case_ : int = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
snake_case_ : Any = [2, 3]
snake_case_ : Tuple = {'a': 2, 'b': 3}
snake_case_ : str = {'a': [2, 3], 'b': [4, 5]}
snake_case_ : List[str] = {'a': {'1': 2}, 'b': 3}
snake_case_ : Union[str, Any] = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
assert map_nested(__a , __a , num_proc=__a ) == expected_map_nested_sa
| 88 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_SCREAMING_SNAKE_CASE = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self : Tuple , _A : Any , _A : str , _A : int=None , _A : str=1 ) -> List[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = tokenizer
snake_case_ : Optional[int] = dataset
snake_case_ : List[str] = len(_A ) if n_tasks is None else n_tasks
snake_case_ : List[Any] = n_copies
def __iter__( self : Any ) -> List[str]:
"""simple docstring"""
snake_case_ : List[str] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
snake_case_ : Optional[int] = self.tokenizer(_A , padding=_A , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self : List[Any] , _A : Optional[int] , _A : str , _A : Dict ) -> Any:
"""simple docstring"""
snake_case_ : List[str] = start_length
snake_case_ : int = eof_strings
snake_case_ : Dict = tokenizer
def __call__( self : Any , _A : Union[str, Any] , _A : Dict , **_A : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[int] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
snake_case_ : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_A )
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : List[str] = re.split('(%s)' % '|'.join(__a ) , __a )
# last string should be ""
return "".join(string_list[:-2] )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=20 , **__a ):
snake_case_ : Tuple = defaultdict(__a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__a ) ):
with torch.no_grad():
snake_case_ : Optional[Any] = batch['ids'].shape[-1]
snake_case_ : List[str] = accelerator.unwrap_model(__a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=__a , **__a )
# each task is generated batch_size times
snake_case_ : List[str] = batch['task_id'].repeat(__a )
snake_case_ : Union[str, Any] = accelerator.pad_across_processes(
__a , dim=1 , pad_index=tokenizer.pad_token_id )
snake_case_ ,snake_case_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
snake_case_ : Optional[Any] = generated_tokens.cpu().numpy()
snake_case_ : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__a , __a ):
gen_token_dict[task].append(__a )
snake_case_ : Tuple = [[] for _ in range(__a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
snake_case_ : int = tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
code_gens[task].append(remove_last_block(__a ) )
return code_gens
def SCREAMING_SNAKE_CASE__ ( ):
# Setup configuration
snake_case_ : Optional[int] = HfArgumentParser(__a )
snake_case_ : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
snake_case_ : Optional[int] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
snake_case_ : int = 'false'
if args.num_workers is None:
snake_case_ : Any = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
snake_case_ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=__a )
# Load model and tokenizer
snake_case_ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
snake_case_ : int = tokenizer.eos_token
snake_case_ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
snake_case_ : List[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , __a , __a )] ),
}
# Load evaluation dataset and metric
snake_case_ : Dict = load_dataset('openai_humaneval' )
snake_case_ : Optional[Any] = load_metric('code_eval' )
snake_case_ : Union[str, Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
snake_case_ : Optional[int] = args.n_samples // args.batch_size
snake_case_ : Dict = TokenizedDataset(__a , human_eval['test'] , n_copies=__a , n_tasks=__a )
# do not confuse args.batch_size, which is actually the num_return_sequences
snake_case_ : Optional[Any] = DataLoader(__a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
snake_case_ : Union[str, Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
snake_case_ ,snake_case_ : Union[str, Any] = accelerator.prepare(__a , __a )
snake_case_ : str = complete_code(
__a , __a , __a , __a , n_tasks=__a , batch_size=args.batch_size , **__a , )
if accelerator.is_main_process:
snake_case_ : Tuple = []
for task in tqdm(range(__a ) ):
snake_case_ : Union[str, Any] = human_eval['test'][task]['test']
snake_case_ : Union[str, Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
snake_case_ ,snake_case_ : int = code_eval_metric.compute(
references=__a , predictions=__a , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(__a , __a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 88 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.