code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class _lowercase :
'''simple docstring'''
def __init__( self )-> int:
UpperCAmelCase__ : Any = {}
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=1 )-> List[str]:
if self.graph.get(__UpperCamelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
UpperCAmelCase__ : Optional[Any] = [[w, v]]
if not self.graph.get(__UpperCamelCase ):
UpperCAmelCase__ : Any = []
def lowerCAmelCase__ ( self )-> Dict:
return list(self.graph )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> List[Any]:
if self.graph.get(__UpperCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase=-2 , __UpperCamelCase=-1 )-> Dict:
if s == d:
return []
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : Tuple = []
if s == -2:
UpperCAmelCase__ : Union[str, Any] = list(self.graph )[0]
stack.append(__UpperCamelCase )
visited.append(__UpperCamelCase )
UpperCAmelCase__ : Any = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : Tuple = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCamelCase ) != 0:
UpperCAmelCase__ : Optional[int] = stack[len(__UpperCamelCase ) - 1]
else:
UpperCAmelCase__ : Any = ss
# check if se have reached the starting point
if len(__UpperCamelCase ) == 0:
return visited
def lowerCAmelCase__ ( self , __UpperCamelCase=-1 )-> str:
if c == -1:
UpperCAmelCase__ : Tuple = floor(random() * 1_00_00 ) + 10
for i in range(__UpperCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
UpperCAmelCase__ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCamelCase , __UpperCamelCase , 1 )
def lowerCAmelCase__ ( self , __UpperCamelCase=-2 )-> Tuple:
UpperCAmelCase__ : int = deque()
UpperCAmelCase__ : Dict = []
if s == -2:
UpperCAmelCase__ : int = list(self.graph )[0]
d.append(__UpperCamelCase )
visited.append(__UpperCamelCase )
while d:
UpperCAmelCase__ : Union[str, Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : Any = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
return len(self.graph[u] )
def lowerCAmelCase__ ( self , __UpperCamelCase=-2 )-> Dict:
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : str = []
if s == -2:
UpperCAmelCase__ : int = list(self.graph )[0]
stack.append(__UpperCamelCase )
visited.append(__UpperCamelCase )
UpperCAmelCase__ : int = s
UpperCAmelCase__ : Union[str, Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__UpperCamelCase ) != 0:
UpperCAmelCase__ : Optional[int] = stack[len(__UpperCamelCase ) - 1]
else:
UpperCAmelCase__ : Dict = ss
# check if se have reached the starting point
if len(__UpperCamelCase ) == 0:
return sorted_nodes
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Union[str, Any] = list(self.graph )[0]
stack.append(__UpperCamelCase )
visited.append(__UpperCamelCase )
UpperCAmelCase__ : Any = -2
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Any = s
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase__ : str = len(__UpperCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase__ : Dict = True
if len(__UpperCamelCase ) != 0:
UpperCAmelCase__ : Optional[int] = stack[len(__UpperCamelCase ) - 1]
else:
UpperCAmelCase__ : Tuple = False
indirect_parents.append(__UpperCamelCase )
UpperCAmelCase__ : Any = s
UpperCAmelCase__ : List[str] = ss
# check if se have reached the starting point
if len(__UpperCamelCase ) == 0:
return list(__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : str = []
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Optional[int] = list(self.graph )[0]
stack.append(__UpperCamelCase )
visited.append(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = -2
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : str = s
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase__ : Optional[int] = len(__UpperCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase__ : Optional[Any] = True
if len(__UpperCamelCase ) != 0:
UpperCAmelCase__ : str = stack[len(__UpperCamelCase ) - 1]
else:
UpperCAmelCase__ : List[Any] = False
indirect_parents.append(__UpperCamelCase )
UpperCAmelCase__ : List[str] = s
UpperCAmelCase__ : str = ss
# check if se have reached the starting point
if len(__UpperCamelCase ) == 0:
return False
def lowerCAmelCase__ ( self , __UpperCamelCase=-2 , __UpperCamelCase=-1 )-> List[str]:
UpperCAmelCase__ : Dict = time()
self.dfs(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = time()
return end - begin
def lowerCAmelCase__ ( self , __UpperCamelCase=-2 )-> Optional[int]:
UpperCAmelCase__ : Optional[Any] = time()
self.bfs(__UpperCamelCase )
UpperCAmelCase__ : Tuple = time()
return end - begin
class _lowercase :
'''simple docstring'''
def __init__( self )-> Union[str, Any]:
UpperCAmelCase__ : List[Any] = {}
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=1 )-> Union[str, Any]:
# check if the u exists
if self.graph.get(__UpperCamelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
UpperCAmelCase__ : int = [[w, v]]
# add the other way
if self.graph.get(__UpperCamelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
UpperCAmelCase__ : Optional[int] = [[w, u]]
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> int:
if self.graph.get(__UpperCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCamelCase )
# the other way round
if self.graph.get(__UpperCamelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase=-2 , __UpperCamelCase=-1 )-> Optional[int]:
if s == d:
return []
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : int = []
if s == -2:
UpperCAmelCase__ : Union[str, Any] = list(self.graph )[0]
stack.append(__UpperCamelCase )
visited.append(__UpperCamelCase )
UpperCAmelCase__ : str = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : Union[str, Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCamelCase ) != 0:
UpperCAmelCase__ : List[str] = stack[len(__UpperCamelCase ) - 1]
else:
UpperCAmelCase__ : Dict = ss
# check if se have reached the starting point
if len(__UpperCamelCase ) == 0:
return visited
def lowerCAmelCase__ ( self , __UpperCamelCase=-1 )-> Optional[Any]:
if c == -1:
UpperCAmelCase__ : Dict = floor(random() * 1_00_00 ) + 10
for i in range(__UpperCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
UpperCAmelCase__ : int = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCamelCase , __UpperCamelCase , 1 )
def lowerCAmelCase__ ( self , __UpperCamelCase=-2 )-> Any:
UpperCAmelCase__ : int = deque()
UpperCAmelCase__ : str = []
if s == -2:
UpperCAmelCase__ : List[Any] = list(self.graph )[0]
d.append(__UpperCamelCase )
visited.append(__UpperCamelCase )
while d:
UpperCAmelCase__ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
return len(self.graph[u] )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : str = list(self.graph )[0]
stack.append(__UpperCamelCase )
visited.append(__UpperCamelCase )
UpperCAmelCase__ : Any = -2
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : int = s
UpperCAmelCase__ : int = False
UpperCAmelCase__ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase__ : str = len(__UpperCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase__ : str = True
if len(__UpperCamelCase ) != 0:
UpperCAmelCase__ : int = stack[len(__UpperCamelCase ) - 1]
else:
UpperCAmelCase__ : Optional[int] = False
indirect_parents.append(__UpperCamelCase )
UpperCAmelCase__ : Any = s
UpperCAmelCase__ : Tuple = ss
# check if se have reached the starting point
if len(__UpperCamelCase ) == 0:
return list(__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : int = []
UpperCAmelCase__ : Tuple = list(self.graph )[0]
stack.append(__UpperCamelCase )
visited.append(__UpperCamelCase )
UpperCAmelCase__ : str = -2
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : int = s
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : int = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCAmelCase__ : int = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCAmelCase__ : int = len(__UpperCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCAmelCase__ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase__ : Any = True
if len(__UpperCamelCase ) != 0:
UpperCAmelCase__ : Any = stack[len(__UpperCamelCase ) - 1]
else:
UpperCAmelCase__ : Union[str, Any] = False
indirect_parents.append(__UpperCamelCase )
UpperCAmelCase__ : List[str] = s
UpperCAmelCase__ : List[str] = ss
# check if se have reached the starting point
if len(__UpperCamelCase ) == 0:
return False
def lowerCAmelCase__ ( self )-> str:
return list(self.graph )
def lowerCAmelCase__ ( self , __UpperCamelCase=-2 , __UpperCamelCase=-1 )-> int:
UpperCAmelCase__ : str = time()
self.dfs(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = time()
return end - begin
def lowerCAmelCase__ ( self , __UpperCamelCase=-2 )-> Tuple:
UpperCAmelCase__ : int = time()
self.bfs(__UpperCamelCase )
UpperCAmelCase__ : str = time()
return end - begin
| 660 |
"""simple docstring"""
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ) ) )
def a__ ( lowerCAmelCase : list[float] ):
'''simple docstring'''
if point:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
for item in point:
if not isinstance(lowerCAmelCase , (int, float) ):
UpperCAmelCase__ : Tuple = (
"Expected a list of numbers as input, found "
F"{type(lowerCAmelCase ).__name__}"
)
raise TypeError(lowerCAmelCase )
else:
UpperCAmelCase__ : Dict = F"Expected a list of numbers as input, found {type(lowerCAmelCase ).__name__}"
raise TypeError(lowerCAmelCase )
else:
raise ValueError("Missing an input" )
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase , lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 1 |
"""simple docstring"""
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCamelCase , "embed_dim" ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , "num_heads" ) )
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=64 , __UpperCamelCase=3 , __UpperCamelCase=[16, 48, 96] , __UpperCamelCase=[1, 3, 6] , __UpperCamelCase=[1, 2, 10] , __UpperCamelCase=[7, 3, 3] , __UpperCamelCase=[4, 2, 2] , __UpperCamelCase=[2, 1, 1] , __UpperCamelCase=[2, 2, 2] , __UpperCamelCase=[False, False, True] , __UpperCamelCase=[0.0, 0.0, 0.0] , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=2 , )-> List[Any]:
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : Any = batch_size
UpperCAmelCase__ : Optional[Any] = image_size
UpperCAmelCase__ : Tuple = patch_sizes
UpperCAmelCase__ : List[Any] = patch_stride
UpperCAmelCase__ : Union[str, Any] = patch_padding
UpperCAmelCase__ : int = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : Union[str, Any] = num_labels
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : Dict = embed_dim
UpperCAmelCase__ : Union[str, Any] = num_heads
UpperCAmelCase__ : Union[str, Any] = stride_kv
UpperCAmelCase__ : Tuple = depth
UpperCAmelCase__ : Any = cls_token
UpperCAmelCase__ : Optional[int] = attention_drop_rate
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : List[Any] = layer_norm_eps
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : int = None
if self.use_labels:
UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self )-> Any:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[Any]:
UpperCAmelCase__ : Any = CvtModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(__UpperCamelCase )
UpperCAmelCase__ : Any = (self.image_size, self.image_size)
UpperCAmelCase__ , UpperCAmelCase__ : str = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase__ : Dict = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase__ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : List[Any] = CvtForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Tuple = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = config_and_inputs
UpperCAmelCase__ : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
_A = (
{'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification}
if is_torch_available()
else {}
)
_A = False
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Dict = CvtModelTester(self )
UpperCAmelCase__ : str = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self )-> Union[str, Any]:
return
@unittest.skip(reason="Cvt does not output attentions" )
def lowerCAmelCase__ ( self )-> Optional[int]:
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def lowerCAmelCase__ ( self )-> Optional[Any]:
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def lowerCAmelCase__ ( self )-> Optional[int]:
pass
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(__UpperCamelCase )
UpperCAmelCase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Dict = [*signature.parameters.keys()]
UpperCAmelCase__ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> str:
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Tuple = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : str = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : int = outputs.hidden_states
UpperCAmelCase__ : Any = len(self.model_tester.depth )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : str = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self )-> int:
pass
@slow
def lowerCAmelCase__ ( self )-> Optional[int]:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : str = CvtModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self )-> List[Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Tuple = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__UpperCamelCase )
UpperCAmelCase__ : Dict = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : Optional[int] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Tuple = model(**__UpperCamelCase )
# verify the logits
UpperCAmelCase__ : Optional[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCAmelCase__ : Dict = torch.tensor([0.9285, 0.9015, -0.3150] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 660 |
"""simple docstring"""
import math
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( lowerCAmelCase : int = 1_0001 ):
'''simple docstring'''
try:
UpperCAmelCase__ : List[str] = int(lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
UpperCAmelCase__ : list[int] = []
UpperCAmelCase__ : str = 2
while len(lowerCAmelCase ) < nth:
if is_prime(lowerCAmelCase ):
primes.append(lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'dandelin/vilt-b32-finetuned-vqa'
_A = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
_A = 'image_qa'
_A = AutoProcessor
_A = AutoModelForVisualQuestionAnswering
_A = ['image', 'text']
_A = ['text']
def __init__( self , *__UpperCamelCase , **__UpperCamelCase )-> List[str]:
requires_backends(self , ["vision"] )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> Any:
return self.pre_processor(__UpperCamelCase , __UpperCamelCase , return_tensors="pt" )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Tuple:
with torch.no_grad():
return self.model(**__UpperCamelCase ).logits
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : Optional[int] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 660 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=0.6 , __UpperCamelCase=None , )-> List[Any]:
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : int = mask_ratio
UpperCAmelCase__ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self )-> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = ViTMAEModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[Any] = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCAmelCase__ : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_A = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = ViTMAEModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self )-> Dict:
pass
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Dict = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCAmelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ : str = torch.from_numpy(__UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase__ : Optional[Any] = pt_noise
super().check_pt_tf_models(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[Any] = outputs[0].cpu().numpy()
UpperCAmelCase__ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model_class.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
# Make sure we don't have nans
UpperCAmelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> List[str]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Any:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Optional[Any]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self )-> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
pass
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ViTMAEModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self )-> List[Any]:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self )-> Optional[int]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase__ : Any = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCamelCase )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase__ : List[Any] = ViTMAEConfig()
UpperCAmelCase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase__ : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**__UpperCamelCase , noise=torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase ) )
# verify the logits
UpperCAmelCase__ : str = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCamelCase ) , atol=1E-4 ) )
| 660 | 1 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def a__ ( lowerCAmelCase : Dict ):
'''simple docstring'''
if isinstance(lowerCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _lowercase :
'''simple docstring'''
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> Any:
pass
def lowerCAmelCase__ ( self )-> Union[str, Any]:
pass
def lowerCAmelCase__ ( self )-> Any:
pass
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
UpperCAmelCase__ : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(__UpperCamelCase , __UpperCamelCase , F"Difference between torch and flax is {diff} (>= {tol})." )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : int = FlaxVisionTextDualEncoderModel(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase )-> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = {"vision_model": vision_model, "text_model": text_model}
UpperCAmelCase__ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase )-> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[str] = {"vision_model": vision_model, "text_model": text_model}
UpperCAmelCase__ : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCamelCase )
UpperCAmelCase__ : Tuple = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase )
UpperCAmelCase__ : Dict = after_output[0]
UpperCAmelCase__ : Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-3 )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase )-> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = {"vision_model": vision_model, "text_model": text_model}
UpperCAmelCase__ : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCamelCase )
UpperCAmelCase__ : Tuple = model(
input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , output_attentions=__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(__UpperCamelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ : List[Any] = to_atuple(vision_model.config.image_size )
UpperCAmelCase__ : int = to_atuple(vision_model.config.patch_size )
UpperCAmelCase__ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase__ : str = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCAmelCase__ : Tuple = output.text_model_output.attentions
self.assertEqual(len(__UpperCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> str:
pt_model.to(__UpperCamelCase )
pt_model.eval()
# prepare inputs
UpperCAmelCase__ : Union[str, Any] = inputs_dict
UpperCAmelCase__ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
UpperCAmelCase__ : int = pt_model(**__UpperCamelCase ).to_tuple()
UpperCAmelCase__ : List[Any] = fx_model(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__UpperCamelCase , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase , from_pt=__UpperCamelCase )
UpperCAmelCase__ : Any = fx_model_loaded(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__UpperCamelCase , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : str = VisionTextDualEncoderModel.from_pretrained(__UpperCamelCase , from_flax=__UpperCamelCase )
pt_model_loaded.to(__UpperCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
UpperCAmelCase__ : List[Any] = pt_model_loaded(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__UpperCamelCase , pt_output_loaded.numpy() , 4E-2 )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Dict = VisionTextDualEncoderModel(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = FlaxVisionTextDualEncoderModel(__UpperCamelCase )
UpperCAmelCase__ : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCamelCase )
UpperCAmelCase__ : Tuple = fx_state
self.check_pt_flax_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[str] = VisionTextDualEncoderModel(__UpperCamelCase )
UpperCAmelCase__ : str = FlaxVisionTextDualEncoderModel(__UpperCamelCase )
UpperCAmelCase__ : str = load_flax_weights_in_pytorch_model(__UpperCamelCase , fx_model.params )
self.check_pt_flax_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**__UpperCamelCase )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__UpperCamelCase )
@is_pt_flax_cross_test
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ : Optional[int] = config_inputs_dict.pop("vision_config" )
UpperCAmelCase__ : Optional[Any] = config_inputs_dict.pop("text_config" )
UpperCAmelCase__ : Union[str, Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.check_equivalence_flax_to_pt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@slow
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.get_pretrained_model_and_inputs()
UpperCAmelCase__ : str = model_a(**__UpperCamelCase )
UpperCAmelCase__ : List[str] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model_a(**__UpperCamelCase )
UpperCAmelCase__ : List[str] = after_outputs[0]
UpperCAmelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
@require_flax
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=__UpperCamelCase , text_from_pt=__UpperCamelCase , )
UpperCAmelCase__ : Tuple = 13
UpperCAmelCase__ : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCAmelCase__ : List[str] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCAmelCase__ : int = random_attention_mask([batch_size, 4] )
UpperCAmelCase__ : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> Dict:
UpperCAmelCase__ : int = FlaxViTModel(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = FlaxBertModel(__UpperCamelCase )
return vision_model, text_model
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : Tuple = FlaxViTModelTester(self )
UpperCAmelCase__ : Tuple = FlaxBertModelTester(self )
UpperCAmelCase__ : List[Any] = vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ : Dict = vision_config_and_inputs
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=__UpperCamelCase , text_from_pt=__UpperCamelCase , )
UpperCAmelCase__ : Optional[int] = 13
UpperCAmelCase__ : List[str] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCAmelCase__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
UpperCAmelCase__ : List[str] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> str:
UpperCAmelCase__ : Any = FlaxCLIPVisionModel(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = FlaxBertModel(__UpperCamelCase )
return vision_model, text_model
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : Tuple = FlaxCLIPVisionModelTester(self )
UpperCAmelCase__ : Tuple = FlaxBertModelTester(self )
UpperCAmelCase__ : int = clip_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ : Tuple = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ : Any = vision_config_and_inputs
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
UpperCAmelCase__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
UpperCAmelCase__ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase__ : List[Any] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=__UpperCamelCase , padding=__UpperCamelCase , return_tensors="np" )
UpperCAmelCase__ : Dict = model(**__UpperCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCAmelCase__ : Optional[Any] = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __UpperCamelCase , atol=1E-3 ) )
| 660 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowercase :
'''simple docstring'''
_A = 42
# setable values
_A = 42
_A = 42
_A = None
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
return cls(common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase )
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_A = [e.name for e in FlaxKarrasDiffusionSchedulers]
_A = 42
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return True
@register_to_config
def __init__( self , __UpperCamelCase = 10_00 , __UpperCamelCase = 0.0001 , __UpperCamelCase = 0.02 , __UpperCamelCase = "linear" , __UpperCamelCase = None , __UpperCamelCase = "fixed_small" , __UpperCamelCase = True , __UpperCamelCase = "epsilon" , __UpperCamelCase = jnp.floataa , )-> List[str]:
UpperCAmelCase__ : int = dtype
def lowerCAmelCase__ ( self , __UpperCamelCase = None )-> DDPMSchedulerState:
if common is None:
UpperCAmelCase__ : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None )-> jnp.ndarray:
return sample
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = () )-> DDPMSchedulerState:
UpperCAmelCase__ : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Optional[int] = (jnp.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : Any = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : Dict = jnp.clip(__UpperCamelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Tuple = jnp.log(jnp.clip(__UpperCamelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
UpperCAmelCase__ : Optional[int] = (predicted_variance + 1) / 2
UpperCAmelCase__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , )-> Union[FlaxDDPMSchedulerOutput, Tuple]:
UpperCAmelCase__ : List[str] = timestep
if key is None:
UpperCAmelCase__ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = jnp.split(__UpperCamelCase , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : Optional[Any] = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : Union[str, Any] = 1 - alpha_prod_t
UpperCAmelCase__ : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : Any = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : List[Any] = jnp.clip(__UpperCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : Any = jax.random.split(__UpperCamelCase , num=1 )
UpperCAmelCase__ : int = jax.random.normal(__UpperCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__UpperCamelCase , __UpperCamelCase , predicted_variance=__UpperCamelCase ) ** 0.5) * noise
UpperCAmelCase__ : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__UpperCamelCase , state=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return add_noise_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return get_velocity_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __len__( self )-> Tuple:
return self.config.num_train_timesteps
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase : list[list[int]] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = len(lowerCAmelCase )
# We need to create solution object to save path.
UpperCAmelCase__ : Optional[Any] = [[0 for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )]
UpperCAmelCase__ : Any = run_maze(lowerCAmelCase , 0 , 0 , lowerCAmelCase )
if solved:
print("\n".join(str(lowerCAmelCase ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def a__ ( lowerCAmelCase : list[list[int]] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : list[list[int]] ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(lowerCAmelCase )
# Final check point.
if i == j == (size - 1):
UpperCAmelCase__ : int = 1
return True
UpperCAmelCase__ : Tuple = (not i < 0) and (not j < 0) # Check lower bounds
UpperCAmelCase__ : Dict = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCAmelCase__ : Optional[Any] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCAmelCase__ : str = 1
# check for directions
if (
run_maze(lowerCAmelCase , i + 1 , lowerCAmelCase , lowerCAmelCase )
or run_maze(lowerCAmelCase , lowerCAmelCase , j + 1 , lowerCAmelCase )
or run_maze(lowerCAmelCase , i - 1 , lowerCAmelCase , lowerCAmelCase )
or run_maze(lowerCAmelCase , lowerCAmelCase , j - 1 , lowerCAmelCase )
):
return True
UpperCAmelCase__ : Dict = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ''
_A = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
super().__init__(self , **__UpperCamelCase )
UpperCAmelCase__ : int = repo_info
UpperCAmelCase__ : Optional[int] = token
UpperCAmelCase__ : Optional[Any] = None
def lowerCAmelCase__ ( self )-> Optional[Any]:
if self.dir_cache is None:
UpperCAmelCase__ : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ : str = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"name": str(__UpperCamelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , )-> List[Any]:
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> List[str]:
self._get_dirs()
UpperCAmelCase__ : Union[str, Any] = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase )-> str:
self._get_dirs()
UpperCAmelCase__ : str = PurePosixPath(path.strip("/" ) )
UpperCAmelCase__ : Optional[Any] = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ : Optional[int] = PurePosixPath(p.strip("/" ) )
UpperCAmelCase__ : Dict = p.parent
if root == path:
UpperCAmelCase__ : Tuple = f
UpperCAmelCase__ : List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 1 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : Dict = """https://openaipublic.azureedge.net/jukebox/models/"""
A__ : Any = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def a__ ( lowerCAmelCase : Any ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
UpperCAmelCase__ : Union[str, Any] = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
UpperCAmelCase__ : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
UpperCAmelCase__ : Dict = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
UpperCAmelCase__ : List[Any] = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
UpperCAmelCase__ : Union[str, Any] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
UpperCAmelCase__ : Tuple = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
UpperCAmelCase__ : Optional[Any] = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
UpperCAmelCase__ : Dict = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def a__ ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = {}
import re
UpperCAmelCase__ : Tuple = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
UpperCAmelCase__ : str = re.compile(
R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
UpperCAmelCase__ : Tuple = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
UpperCAmelCase__ : Optional[Any] = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
UpperCAmelCase__ : List[str] = re.compile(
R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
UpperCAmelCase__ : Optional[Any] = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
UpperCAmelCase__ : Optional[int] = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
UpperCAmelCase__ : Dict = re.compile(
R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
UpperCAmelCase__ : Optional[Any] = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(lowerCAmelCase ):
UpperCAmelCase__ : Dict = re_encoder_block_conv_in.match(lowerCAmelCase )
UpperCAmelCase__ : str = regex_match.groups()
UpperCAmelCase__ : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] )
UpperCAmelCase__ : List[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
UpperCAmelCase__ : Union[str, Any] = re_encoder_block_conv_in.sub(lowerCAmelCase , lowerCAmelCase )
elif re_encoder_block_resnet.fullmatch(lowerCAmelCase ):
UpperCAmelCase__ : int = re_encoder_block_resnet.match(lowerCAmelCase )
UpperCAmelCase__ : int = regex_match.groups()
UpperCAmelCase__ : int = int(groups[2] ) * 2 + int(groups[3] )
UpperCAmelCase__ : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
UpperCAmelCase__ : Any = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
UpperCAmelCase__ : List[str] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
UpperCAmelCase__ : str = prefix + resnet_block
UpperCAmelCase__ : Tuple = re_encoder_block_resnet.sub(lowerCAmelCase , lowerCAmelCase )
elif re_encoder_block_proj_out.fullmatch(lowerCAmelCase ):
UpperCAmelCase__ : Dict = re_encoder_block_proj_out.match(lowerCAmelCase )
UpperCAmelCase__ : Dict = regex_match.groups()
UpperCAmelCase__ : Optional[int] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
UpperCAmelCase__ : Union[str, Any] = re_encoder_block_proj_out.sub(lowerCAmelCase , lowerCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = re_decoder_block_conv_out.match(lowerCAmelCase )
UpperCAmelCase__ : Dict = regex_match.groups()
UpperCAmelCase__ : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCAmelCase__ : Any = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
UpperCAmelCase__ : Tuple = re_decoder_block_conv_out.sub(lowerCAmelCase , lowerCAmelCase )
elif re_decoder_block_resnet.fullmatch(lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = re_decoder_block_resnet.match(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = regex_match.groups()
UpperCAmelCase__ : List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCAmelCase__ : Tuple = {"1": 1, "3": 2}[groups[-2]]
UpperCAmelCase__ : Any = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
UpperCAmelCase__ : int = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
UpperCAmelCase__ : Union[str, Any] = prefix + resnet_block
UpperCAmelCase__ : List[Any] = re_decoder_block_resnet.sub(lowerCAmelCase , lowerCAmelCase )
elif re_decoder_block_proj_in.fullmatch(lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = re_decoder_block_proj_in.match(lowerCAmelCase )
UpperCAmelCase__ : List[str] = regex_match.groups()
UpperCAmelCase__ : Dict = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
UpperCAmelCase__ : Optional[int] = re_decoder_block_proj_in.sub(lowerCAmelCase , lowerCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(lowerCAmelCase ):
UpperCAmelCase__ : List[str] = re_prior_cond_conv_out.match(lowerCAmelCase )
UpperCAmelCase__ : Tuple = regex_match.groups()
UpperCAmelCase__ : Tuple = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCAmelCase__ : List[str] = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
UpperCAmelCase__ : int = re_prior_cond_conv_out.sub(lowerCAmelCase , lowerCAmelCase )
elif re_prior_cond_resnet.fullmatch(lowerCAmelCase ):
UpperCAmelCase__ : Tuple = re_prior_cond_resnet.match(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = regex_match.groups()
UpperCAmelCase__ : int = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCAmelCase__ : Any = {"1": 1, "3": 2}[groups[-2]]
UpperCAmelCase__ : int = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
UpperCAmelCase__ : Dict = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
UpperCAmelCase__ : int = prefix + resnet_block
UpperCAmelCase__ : int = re_prior_cond_resnet.sub(lowerCAmelCase , lowerCAmelCase )
elif re_prior_cond_proj_in.fullmatch(lowerCAmelCase ):
UpperCAmelCase__ : Tuple = re_prior_cond_proj_in.match(lowerCAmelCase )
UpperCAmelCase__ : Tuple = regex_match.groups()
UpperCAmelCase__ : List[Any] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
UpperCAmelCase__ : Union[str, Any] = re_prior_cond_proj_in.sub(lowerCAmelCase , lowerCAmelCase )
# keep original key
else:
UpperCAmelCase__ : Tuple = original_key
UpperCAmelCase__ : Optional[Any] = replace_key(lowerCAmelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
UpperCAmelCase__ : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
UpperCAmelCase__ : str = original_key
UpperCAmelCase__ : Optional[int] = original_key
UpperCAmelCase__ : List[str] = value
return new_dict
@torch.no_grad()
def a__ ( lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
UpperCAmelCase__ : Optional[int] = requests.get(F"{PREFIX}{file}" , allow_redirects=lowerCAmelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=lowerCAmelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
UpperCAmelCase__ : Any = MODEL_MAPPING[model_name.split("/" )[-1]]
UpperCAmelCase__ : Dict = JukeboxConfig.from_pretrained(lowerCAmelCase )
UpperCAmelCase__ : int = JukeboxModel(lowerCAmelCase )
UpperCAmelCase__ : str = []
UpperCAmelCase__ : str = {}
for i, dict_name in enumerate(lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
UpperCAmelCase__ : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
UpperCAmelCase__ : Union[str, Any] = old_dic[k]
elif k.endswith(".w" ):
UpperCAmelCase__ : List[str] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
UpperCAmelCase__ : List[str] = old_dic[k]
else:
UpperCAmelCase__ : Optional[int] = old_dic[k]
UpperCAmelCase__ : str = "vqvae" if i == 0 else F"priors.{3 - i}"
UpperCAmelCase__ : Tuple = fix_jukebox_keys(lowerCAmelCase , model.state_dict() , lowerCAmelCase , lowerCAmelCase )
weight_dict.append(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(lowerCAmelCase )
for i in range(len(lowerCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(lowerCAmelCase , lowerCAmelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase )
return weight_dict
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
A__ : str = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 660 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : Dict = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['pixel_values']
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase__ : List[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase__ : int = get_size_dict(__UpperCamelCase , param_name="crop_size" )
UpperCAmelCase__ : Dict = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : List[Any] = do_center_crop
UpperCAmelCase__ : str = crop_size
UpperCAmelCase__ : Optional[int] = resample
UpperCAmelCase__ : int = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : Union[str, Any] = offset
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(__UpperCamelCase , size["shortest_edge"] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
UpperCAmelCase__ : Any = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[Any] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> Tuple:
UpperCAmelCase__ : str = image.astype(np.floataa )
if offset:
UpperCAmelCase__ : Tuple = image - (scale / 2)
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , )-> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[Any] = to_numpy_array(__UpperCamelCase )
if do_resize:
UpperCAmelCase__ : Union[str, Any] = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
UpperCAmelCase__ : int = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
UpperCAmelCase__ : List[str] = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase , offset=__UpperCamelCase )
if do_normalize:
UpperCAmelCase__ : List[Any] = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
UpperCAmelCase__ : Dict = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> PIL.Image.Image:
UpperCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Optional[int] = offset if offset is not None else self.offset
UpperCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Tuple = get_size_dict(__UpperCamelCase , param_name="crop_size" )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase__ : List[str] = make_batched(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , offset=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase__ : Dict = {"pixel_values": videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 660 | 1 |
"""simple docstring"""
from math import isqrt
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = False
return [i for i in range(2 , lowerCAmelCase ) if is_prime[i]]
def a__ ( lowerCAmelCase : int = 10**8 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = len(lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(lowerCAmelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 1 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , )-> Dict:
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : str = seq_length
UpperCAmelCase__ : int = is_training
UpperCAmelCase__ : Union[str, Any] = use_input_mask
UpperCAmelCase__ : int = use_token_type_ids
UpperCAmelCase__ : List[Any] = use_labels
UpperCAmelCase__ : Optional[Any] = vocab_size
UpperCAmelCase__ : Dict = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : Union[str, Any] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = max_position_embeddings
UpperCAmelCase__ : Optional[Any] = type_vocab_size
UpperCAmelCase__ : str = type_sequence_label_size
UpperCAmelCase__ : Optional[Any] = initializer_range
UpperCAmelCase__ : List[str] = num_labels
UpperCAmelCase__ : Union[str, Any] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : List[str] = None
if self.use_input_mask:
UpperCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Optional[int] = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self )-> List[Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = LlamaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : List[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> Tuple:
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Dict = LlamaModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : int = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )
UpperCAmelCase__ : Any = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )
UpperCAmelCase__ : Any = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> Optional[Any]:
UpperCAmelCase__ : List[str] = LlamaForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Dict = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> int:
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : List[Any] = LlamaForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# first forward pass
UpperCAmelCase__ : List[Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase , )
UpperCAmelCase__ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase__ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase__ : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase__ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase__ : str = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase__ : Tuple = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["hidden_states"][0]
UpperCAmelCase__ : int = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["hidden_states"][0]
# select random slice
UpperCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase__ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Dict = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Any = config_and_inputs
UpperCAmelCase__ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_A = (LlamaForCausalLM,) if is_torch_available() else ()
_A = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_A = False
_A = False
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Optional[int] = LlamaModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> int:
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : int = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[Any] = 3
UpperCAmelCase__ : int = input_dict["input_ids"]
UpperCAmelCase__ : Any = input_ids.ne(1 ).to(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Any = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : List[Any] = 3
UpperCAmelCase__ : List[Any] = "single_label_classification"
UpperCAmelCase__ : List[Any] = input_dict["input_ids"]
UpperCAmelCase__ : str = input_ids.ne(1 ).to(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Tuple = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Any = 3
UpperCAmelCase__ : Union[str, Any] = "multi_label_classification"
UpperCAmelCase__ : Optional[Any] = input_dict["input_ids"]
UpperCAmelCase__ : str = input_ids.ne(1 ).to(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase__ : int = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def lowerCAmelCase__ ( self )-> int:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase__ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase__ : Optional[Any] = LlamaModel(__UpperCamelCase )
original_model.to(__UpperCamelCase )
original_model.eval()
UpperCAmelCase__ : Optional[Any] = original_model(__UpperCamelCase ).last_hidden_state
UpperCAmelCase__ : Dict = original_model(__UpperCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase__ : Union[str, Any] = {"type": scaling_type, "factor": 10.0}
UpperCAmelCase__ : Union[str, Any] = LlamaModel(__UpperCamelCase )
scaled_model.to(__UpperCamelCase )
scaled_model.eval()
UpperCAmelCase__ : Tuple = scaled_model(__UpperCamelCase ).last_hidden_state
UpperCAmelCase__ : Any = scaled_model(__UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
UpperCAmelCase__ : List[str] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
UpperCAmelCase__ : List[Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
UpperCAmelCase__ : int = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCAmelCase__ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : str = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
UpperCAmelCase__ : Optional[Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
UpperCAmelCase__ : List[str] = model(torch.tensor(__UpperCamelCase ) )
# Expected mean on dim = -1
UpperCAmelCase__ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCAmelCase__ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : int = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
UpperCAmelCase__ : int = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
UpperCAmelCase__ : Union[str, Any] = model(torch.tensor(__UpperCamelCase ) )
# Expected mean on dim = -1
UpperCAmelCase__ : Optional[Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCAmelCase__ : Tuple = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Optional[int] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
UpperCAmelCase__ : Union[str, Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
UpperCAmelCase__ : Optional[int] = model(torch.tensor(__UpperCamelCase ) )
UpperCAmelCase__ : List[str] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# fmt: off
UpperCAmelCase__ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Dict = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
UpperCAmelCase__ : int = "Simply put, the theory of relativity states that "
UpperCAmelCase__ : List[str] = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
UpperCAmelCase__ : Any = tokenizer.encode(__UpperCamelCase , return_tensors="pt" )
UpperCAmelCase__ : Tuple = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=__UpperCamelCase )
# greedy generation outputs
UpperCAmelCase__ : List[str] = model.generate(__UpperCamelCase , max_new_tokens=64 , top_p=__UpperCamelCase , temperature=1 , do_sample=__UpperCamelCase )
UpperCAmelCase__ : List[str] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 660 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A__ : Optional[Any] = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : bool , lowerCAmelCase : bool ):
'''simple docstring'''
def run_func(lowerCAmelCase : Dict ):
@wraps(lowerCAmelCase )
def run_in_eager_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict ):
return func(*lowerCAmelCase , **lowerCAmelCase )
@wraps(lowerCAmelCase )
@tf.function(experimental_compile=lowerCAmelCase )
def run_in_graph_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any] ):
return func(*lowerCAmelCase , **lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a__ ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = random.Random()
UpperCAmelCase__ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = 42
_A = "TensorFlow"
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return tf.__version__
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
# initialize GPU on separate process
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Union[str, Any] = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
UpperCAmelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : List[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Any = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Optional[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : Union[str, Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Optional[int] = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : str = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : Any = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : List[Any] = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Dict = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : Any = TF_MODEL_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : int = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Optional[Any] = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , training=__UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__UpperCamelCase , training=__UpperCamelCase )
UpperCAmelCase__ : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : List[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Any = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : Any = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : int = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : int = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : Union[str, Any] = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Dict = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Union[str, Any] = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Any = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
UpperCAmelCase__ : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase__ ( self , __UpperCamelCase )-> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__UpperCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase__ : Optional[Any] = timeit.repeat(
__UpperCamelCase , repeat=self.args.repeat , number=10 , )
return min(__UpperCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCAmelCase__ : List[str] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCAmelCase__ : Optional[int] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(__UpperCamelCase )
UpperCAmelCase__ : str = meminfo.used
UpperCAmelCase__ : int = Memory(__UpperCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCAmelCase__ : Any = None
else:
UpperCAmelCase__ : List[Any] = measure_peak_memory_cpu(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = Memory(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase__ : Optional[Any] = stop_memory_tracing(__UpperCamelCase )
if memory is None:
UpperCAmelCase__ : Tuple = summary.total
else:
UpperCAmelCase__ : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 660 | 1 |
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
A__ : Optional[int] = _symbol_database.Default()
A__ : int = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
A__ : str = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
A__ : Dict = None
A__ : List[str] = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
A__ : Tuple = 45
A__ : List[Any] = 1_581
A__ : int = 1_517
A__ : str = 1_570
A__ : Union[str, Any] = 1_584
A__ : List[str] = 1_793
A__ : List[str] = 1_795
A__ : List[Any] = 1_916
A__ : Optional[Any] = 1_864
A__ : str = 1_905
A__ : Optional[Any] = 1_919
A__ : Optional[int] = 2_429
A__ : int = 2_208
A__ : int = 2_418
A__ : Union[str, Any] = 2_323
A__ : Dict = 2_407
# @@protoc_insertion_point(module_scope)
| 660 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
A__ : List[str] = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 660 | 1 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
A__ : Any = False
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : List[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = "A painting of a squirrel eating a burger "
UpperCAmelCase__ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase__ : int = pipe(
prompt=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : int = VersatileDiffusionTextToImagePipeline.from_pretrained(__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : str = generator.manual_seed(0 )
UpperCAmelCase__ : Tuple = pipe(
prompt=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : List[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = "A painting of a squirrel eating a burger "
UpperCAmelCase__ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = pipe(
prompt=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCAmelCase__ : Dict = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : int = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 660 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , __UpperCamelCase = 7_68 , )-> Union[str, Any]:
super().__init__()
UpperCAmelCase__ : str = nn.Parameter(torch.zeros(1 , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[int] = nn.Parameter(torch.ones(1 , __UpperCamelCase ) )
def lowerCAmelCase__ ( self , __UpperCamelCase = None , __UpperCamelCase = None , )-> Any:
UpperCAmelCase__ : Dict = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) )
UpperCAmelCase__ : Any = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) )
return self
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : Any = (embeds * self.std) + self.mean
return embeds
| 660 | 1 |
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
A__ : int = logging.getLogger(__name__)
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'token-classification'
def __init__( self , __UpperCamelCase )-> Union[str, Any]:
if type(__UpperCamelCase ) == dict:
UpperCAmelCase__ : int = Namespace(**__UpperCamelCase )
UpperCAmelCase__ : Dict = import_module("tasks" )
try:
UpperCAmelCase__ : Optional[int] = getattr(__UpperCamelCase , hparams.task_type )
UpperCAmelCase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
F"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
UpperCAmelCase__ : List[Any] = self.token_classification_task.get_labels(hparams.labels )
UpperCAmelCase__ : Optional[int] = CrossEntropyLoss().ignore_index
super().__init__(__UpperCamelCase , len(self.labels ) , self.mode )
def lowerCAmelCase__ ( self , **__UpperCamelCase )-> Dict:
return self.model(**__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase__ : Any = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase__ : Union[str, Any] = self(**__UpperCamelCase )
UpperCAmelCase__ : List[str] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : str = self.hparams
for mode in ["train", "dev", "test"]:
UpperCAmelCase__ : Any = self._feature_file(__UpperCamelCase )
if os.path.exists(__UpperCamelCase ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , __UpperCamelCase )
UpperCAmelCase__ : str = torch.load(__UpperCamelCase )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
UpperCAmelCase__ : List[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = self.token_classification_task.convert_examples_to_features(
__UpperCamelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__UpperCamelCase , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , __UpperCamelCase )
torch.save(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False )-> DataLoader:
UpperCAmelCase__ : Optional[Any] = self._feature_file(__UpperCamelCase )
logger.info("Loading features from cached file %s" , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = torch.load(__UpperCamelCase )
UpperCAmelCase__ : Tuple = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCAmelCase__ : Optional[int] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCAmelCase__ : str = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCAmelCase__ : Optional[Any] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCAmelCase__ : Optional[int] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , batch_size=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> str:
"""Compute validation""" ""
UpperCAmelCase__ : Tuple = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
UpperCAmelCase__ : List[Any] = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCAmelCase__ : Dict = self(**__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : str = outputs[:2]
UpperCAmelCase__ : str = logits.detach().cpu().numpy()
UpperCAmelCase__ : Optional[int] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : List[Any] = torch.stack([x["val_loss"] for x in outputs] ).mean()
UpperCAmelCase__ : Optional[Any] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
UpperCAmelCase__ : int = np.argmax(__UpperCamelCase , axis=2 )
UpperCAmelCase__ : Any = np.concatenate([x["target"] for x in outputs] , axis=0 )
UpperCAmelCase__ : Dict = dict(enumerate(self.labels ) )
UpperCAmelCase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCAmelCase__ : List[Any] = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(__UpperCamelCase , __UpperCamelCase ),
"precision": precision_score(__UpperCamelCase , __UpperCamelCase ),
"recall": recall_score(__UpperCamelCase , __UpperCamelCase ),
"f1": fa_score(__UpperCamelCase , __UpperCamelCase ),
}
UpperCAmelCase__ : Union[str, Any] = dict(results.items() )
UpperCAmelCase__ : Dict = results
return ret, preds_list, out_label_list
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Any:
# when stable
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self._eval_end(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
# updating to test_epoch_end instead of deprecated test_end
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self._eval_end(__UpperCamelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCAmelCase__ : List[str] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCAmelCase__ ( __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
# Add NER specific options
BaseTransformer.add_model_specific_args(__UpperCamelCase , __UpperCamelCase )
parser.add_argument(
"--task_type" , default="NER" , type=__UpperCamelCase , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=__UpperCamelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=__UpperCamelCase , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=__UpperCamelCase , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
A__ : Optional[Any] = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
A__ : str = NERTransformer.add_model_specific_args(parser, os.getcwd())
A__ : Dict = parser.parse_args()
A__ : Union[str, Any] = NERTransformer(args)
A__ : str = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
A__ : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
A__ : str = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 660 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
'''simple docstring'''
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[int] = GPTaConfig()
else:
UpperCAmelCase__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = GPTaModel(lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
UpperCAmelCase__ : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase__ : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
A__ : Optional[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Union[str, Any] = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
A__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 660 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A__ : Optional[int] = ["""small""", """medium""", """large"""]
A__ : Optional[int] = """lm_head.decoder.weight"""
A__ : Dict = """lm_head.weight"""
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.load(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = d.pop(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
torch.save(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
A__ : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A__ : Tuple = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
A__ : str = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 660 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__ : Tuple = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = ["""YolosFeatureExtractor"""]
A__ : Optional[Any] = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
A__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 660 |
"""simple docstring"""
from math import isqrt
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = False
return [i for i in range(2 , lowerCAmelCase ) if is_prime[i]]
def a__ ( lowerCAmelCase : int = 10**8 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = len(lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 1 |
"""simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : int , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = input_paths_and_base_extractors[compression_format]
if input_path is None:
UpperCAmelCase__ : int = F"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase )
assert base_extractor.is_extractable(lowerCAmelCase )
UpperCAmelCase__ : int = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(lowerCAmelCase , lowerCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ : Dict = file_path.read_text(encoding="utf-8" )
else:
UpperCAmelCase__ : Any = output_path.read_text(encoding="utf-8" )
UpperCAmelCase__ : str = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def a__ ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : str , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
UpperCAmelCase__ : Any = input_paths[compression_format]
if input_path is None:
UpperCAmelCase__ : List[Any] = F"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = Extractor.infer_extractor_format(lowerCAmelCase )
assert extractor_format is not None
UpperCAmelCase__ : Any = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ : Optional[Any] = file_path.read_text(encoding="utf-8" )
else:
UpperCAmelCase__ : Tuple = output_path.read_text(encoding="utf-8" )
UpperCAmelCase__ : Union[str, Any] = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Tuple ):
'''simple docstring'''
import tarfile
UpperCAmelCase__ : str = tmp_path / "data_dot_dot"
directory.mkdir()
UpperCAmelCase__ : int = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(lowerCAmelCase , "w" ) as f:
f.add(lowerCAmelCase , arcname=os.path.join(".." , text_file.name ) )
return path
@pytest.fixture
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
import tarfile
UpperCAmelCase__ : int = tmp_path / "data_sym_link"
directory.mkdir()
UpperCAmelCase__ : Dict = directory / "tar_file_with_sym_link.tar"
os.symlink(".." , directory / "subdir" , target_is_directory=lowerCAmelCase )
with tarfile.TarFile(lowerCAmelCase , "w" ) as f:
f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , )
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
UpperCAmelCase__ : Optional[int] = insecure_tar_files[insecure_tar_file]
UpperCAmelCase__ : Tuple = tmp_path / "extracted"
TarExtractor.extract(lowerCAmelCase , lowerCAmelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
UpperCAmelCase__ : Optional[int] = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
UpperCAmelCase__ : Optional[Any] = (
b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
b"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
b"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
b"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(lowerCAmelCase )
assert zipfile.is_zipfile(str(lowerCAmelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(lowerCAmelCase ) # but we're right
| 660 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = np.full((len(lowerCAmelCase ), sequence_length, 2) , lowerCAmelCase )
else:
UpperCAmelCase__ : Optional[Any] = np.full((len(lowerCAmelCase ), sequence_length) , lowerCAmelCase )
for i, tensor in enumerate(lowerCAmelCase ):
if padding_side == "right":
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Dict = tensor[:sequence_length]
else:
UpperCAmelCase__ : Tuple = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase__ : int = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ord(lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase__ : Optional[Any] = unicodedata.category(lowerCAmelCase )
if cat.startswith("P" ):
return True
return False
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = True
_A = None
_A = None
_A = -100
_A = "pt"
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
import torch
UpperCAmelCase__ : Optional[Any] = "label" if "label" in features[0].keys() else "labels"
UpperCAmelCase__ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase__ : str = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch["entity_ids"] ).shape[1]
UpperCAmelCase__ : int = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase__ : int = [
list(__UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) for label in labels
]
else:
UpperCAmelCase__ : List[Any] = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) + list(__UpperCamelCase ) for label in labels
]
UpperCAmelCase__ : Optional[Any] = [feature["ner_tags"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , -1 , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = [feature["original_entity_spans"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , (-1, -1) , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {k: torch.tensor(__UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 660 | 1 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , __UpperCamelCase = 7_68 , )-> Union[str, Any]:
super().__init__()
UpperCAmelCase__ : str = nn.Parameter(torch.zeros(1 , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[int] = nn.Parameter(torch.ones(1 , __UpperCamelCase ) )
def lowerCAmelCase__ ( self , __UpperCamelCase = None , __UpperCamelCase = None , )-> Any:
UpperCAmelCase__ : Dict = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) )
UpperCAmelCase__ : Any = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) )
return self
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : Any = (embeds * self.std) + self.mean
return embeds
| 660 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
def wrapper(*lowerCAmelCase : Any , **lowerCAmelCase : Tuple ):
UpperCAmelCase__ : Optional[int] = timeit.default_timer()
UpperCAmelCase__ : int = func(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : List[Any] = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : int = func.__name__
return wrapper
def a__ ( lowerCAmelCase : dict , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[Any] = seq_shapes or {}
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
UpperCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Dict = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase__ : str = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
UpperCAmelCase__ : List[str] = v.feature
UpperCAmelCase__ : Optional[int] = seq_shapes[k]
UpperCAmelCase__ : Optional[int] = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
UpperCAmelCase__ : Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=100 , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : List[Any] = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase__ : Optional[int] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 660 | 1 |
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
A__ : Tuple = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , *__UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase )-> Dict:
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
UpperCAmelCase__ : Tuple = eval_examples
UpperCAmelCase__ : List[Any] = post_process_function
UpperCAmelCase__ : Any = quant_trainer_args
UpperCAmelCase__ : Any = 1_28 # default number of calibration samples
def lowerCAmelCase__ ( self , __UpperCamelCase=None )-> Union[str, Any]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
UpperCAmelCase__ : Union[str, Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
UpperCAmelCase__ : List[Any] = self._remove_unused_columns(__UpperCamelCase , description="Calibration" )
return DataLoader(
__UpperCamelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase=None )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = self.train_dataset if calib_dataset is None else calib_dataset
UpperCAmelCase__ : Union[str, Any] = self.get_calib_dataloader(__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.model
quant_trainer.configure_model(__UpperCamelCase , self.quant_trainer_args , calib=__UpperCamelCase )
model.eval()
quant_trainer.enable_calibration(__UpperCamelCase )
logger.info("***** Running calibration *****" )
logger.info(F" Num examples = {self.calib_num}" )
logger.info(F" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(__UpperCamelCase ):
# Prediction step
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.prediction_step(__UpperCamelCase , __UpperCamelCase , prediction_loss_only=__UpperCamelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__UpperCamelCase , self.quant_trainer_args )
UpperCAmelCase__ : Optional[int] = model
def lowerCAmelCase__ ( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase = "eval" )-> Dict:
UpperCAmelCase__ : Dict = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase__ : str = self.get_eval_dataloader(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase__ : Tuple = self.compute_metrics
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase__ : int = eval_loop(
__UpperCamelCase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCamelCase , )
finally:
UpperCAmelCase__ : Dict = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
UpperCAmelCase__ : Any = self.post_process_function(__UpperCamelCase , __UpperCamelCase , output.predictions )
UpperCAmelCase__ : Dict = self.compute_metrics(__UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
UpperCAmelCase__ : List[Any] = metrics.pop(__UpperCamelCase )
self.log(__UpperCamelCase )
else:
UpperCAmelCase__ : Any = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase__ : Union[str, Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __UpperCamelCase )
return metrics
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase = "test" )-> Union[str, Any]:
UpperCAmelCase__ : Tuple = self.get_test_dataloader(__UpperCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase__ : Optional[Any] = self.compute_metrics
UpperCAmelCase__ : int = None
UpperCAmelCase__ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase__ : Dict = eval_loop(
__UpperCamelCase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCamelCase , )
finally:
UpperCAmelCase__ : Optional[Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase__ : Any = self.post_process_function(__UpperCamelCase , __UpperCamelCase , output.predictions , "predict" )
UpperCAmelCase__ : Union[str, Any] = self.compute_metrics(__UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
UpperCAmelCase__ : Optional[int] = metrics.pop(__UpperCamelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase="./" )-> Any:
UpperCAmelCase__ : Optional[Any] = self.eval_dataset
UpperCAmelCase__ : Optional[Any] = self.get_eval_dataloader(__UpperCamelCase )
UpperCAmelCase__ : Dict = next(iter(__UpperCamelCase ) )
# saving device - to make it consistent
UpperCAmelCase__ : Union[str, Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
UpperCAmelCase__ : Any = tuple(v.to(__UpperCamelCase ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : str = self.model.to(__UpperCamelCase )
model.eval()
model.float()
UpperCAmelCase__ : Any = model.module if hasattr(__UpperCamelCase , "module" ) else model
quant_trainer.configure_model(__UpperCamelCase , self.quant_trainer_args )
UpperCAmelCase__ : Dict = os.path.join(__UpperCamelCase , "model.onnx" )
logger.info(F"exporting model to {output_model_file}" )
UpperCAmelCase__ : List[Any] = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , export_params=__UpperCamelCase , opset_version=13 , do_constant_folding=__UpperCamelCase , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} , verbose=__UpperCamelCase , )
logger.info("onnx export finished" )
| 660 |
"""simple docstring"""
from manim import *
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : str = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[Any] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : int = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("CPU" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Union[str, Any] = Text("GPU" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Model" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
rect.set_stroke(__UpperCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase__ : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCamelCase , buff=0.0 )
self.add(__UpperCamelCase )
cpu_targs.append(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Any = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Loaded Checkpoint" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , aligned_edge=__UpperCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : Any = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase__ : Optional[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.play(Write(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = fill.copy().set_fill(__UpperCamelCase , opacity=0.7 )
target.move_to(__UpperCamelCase )
first_animations.append(GrowFromCenter(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 660 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ : Dict = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
A__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 660 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A__ : Tuple = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : nn.ModuleList , lowerCAmelCase : nn.ModuleList , lowerCAmelCase : List[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F"{len(lowerCAmelCase )} != {len(lowerCAmelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A__ : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A__ : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
'''simple docstring'''
try:
UpperCAmelCase__ : Tuple = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCAmelCase ) )
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Tuple ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( lowerCAmelCase : Union[str, PreTrainedModel] , lowerCAmelCase : Union[str, Path] = "student" , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"teacher must be a model or string got type {type(lowerCAmelCase )}"
UpperCAmelCase__ : int = teacher.config.to_diff_dict()
try:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase__ : Tuple = teacher_e
if d is None:
UpperCAmelCase__ : str = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase__ : Optional[Any] = teacher_e
if d is None:
UpperCAmelCase__ : Optional[Any] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
UpperCAmelCase__ : Tuple = teacher.config_class(**lowerCAmelCase )
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase__ : Optional[int] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase__ , UpperCAmelCase__ : int = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
UpperCAmelCase__ : int = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 660 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
A__ : List[Any] = logging.get_logger(__name__)
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : str = feature_size
UpperCAmelCase__ : List[str] = sampling_rate
UpperCAmelCase__ : List[str] = padding_value
UpperCAmelCase__ : Dict = kwargs.pop("padding_side" , "right" )
UpperCAmelCase__ : Optional[int] = kwargs.pop("return_attention_mask" , __UpperCamelCase )
super().__init__(**__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , )-> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__UpperCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
UpperCAmelCase__ : Any = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
UpperCAmelCase__ : Optional[Any] = processed_features[self.model_input_names[0]]
UpperCAmelCase__ : List[str] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__UpperCamelCase ) == 0:
if return_attention_mask:
UpperCAmelCase__ : Optional[int] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
UpperCAmelCase__ : Union[str, Any] = required_input[0]
if isinstance(__UpperCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
UpperCAmelCase__ : Tuple = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__UpperCamelCase ):
UpperCAmelCase__ : int = "tf"
elif is_torch_tensor(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = "pt"
elif isinstance(__UpperCamelCase , (int, float, list, tuple, np.ndarray) ):
UpperCAmelCase__ : Optional[int] = "np"
else:
raise ValueError(
F"type of {first_element} unknown: {type(__UpperCamelCase )}. "
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
UpperCAmelCase__ : List[Any] = to_numpy(__UpperCamelCase )
else:
UpperCAmelCase__ : Tuple = [to_numpy(__UpperCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
UpperCAmelCase__ : str = self._get_padding_strategies(padding=__UpperCamelCase , max_length=__UpperCamelCase )
UpperCAmelCase__ : Tuple = processed_features[self.model_input_names[0]]
UpperCAmelCase__ : Optional[int] = len(__UpperCamelCase )
if not all(len(__UpperCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
UpperCAmelCase__ : str = []
for i in range(__UpperCamelCase ):
UpperCAmelCase__ : List[str] = {k: v[i] for k, v in processed_features.items()}
# truncation
UpperCAmelCase__ : Tuple = self._truncate(
__UpperCamelCase , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , truncation=__UpperCamelCase , )
truncated_inputs.append(__UpperCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
UpperCAmelCase__ : List[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
UpperCAmelCase__ : Tuple = PaddingStrategy.MAX_LENGTH
UpperCAmelCase__ : List[str] = {}
for i in range(__UpperCamelCase ):
# padding
UpperCAmelCase__ : int = self._pad(
truncated_inputs[i] , max_length=__UpperCamelCase , padding_strategy=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
UpperCAmelCase__ : List[str] = []
if value.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ : List[str] = value.astype(np.floataa )
batch_outputs[key].append(__UpperCamelCase )
return BatchFeature(__UpperCamelCase , tensor_type=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = PaddingStrategy.DO_NOT_PAD , __UpperCamelCase = None , __UpperCamelCase = None , )-> dict:
UpperCAmelCase__ : str = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
UpperCAmelCase__ : Dict = len(__UpperCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase__ : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase__ : List[str] = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__UpperCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
UpperCAmelCase__ : List[str] = np.ones(len(__UpperCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
UpperCAmelCase__ : List[Any] = max_length - len(__UpperCamelCase )
if self.padding_side == "right":
if return_attention_mask:
UpperCAmelCase__ : Optional[Any] = np.pad(
processed_features["attention_mask"] , (0, difference) )
UpperCAmelCase__ : Any = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
UpperCAmelCase__ : List[Any] = np.pad(
__UpperCamelCase , __UpperCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
UpperCAmelCase__ : List[str] = np.pad(
processed_features["attention_mask"] , (difference, 0) )
UpperCAmelCase__ : Dict = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
UpperCAmelCase__ : List[Any] = np.pad(
__UpperCamelCase , __UpperCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , )-> Optional[Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
UpperCAmelCase__ : Optional[int] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase__ : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase__ : Optional[Any] = len(__UpperCamelCase ) > max_length
if needs_to_be_truncated:
UpperCAmelCase__ : Dict = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
UpperCAmelCase__ : int = processed_features["attention_mask"][:max_length]
return processed_features
def lowerCAmelCase__ ( self , __UpperCamelCase=False , __UpperCamelCase=None )-> Union[str, Any]:
# Get padding strategy
if padding is not False:
if padding is True:
UpperCAmelCase__ : Dict = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Union[str, Any] = PaddingStrategy(__UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Tuple = padding
else:
UpperCAmelCase__ : Union[str, Any] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 660 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Tuple = ort.SessionOptions()
UpperCAmelCase__ : List[str] = False
return options
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : Tuple = np.random.RandomState(0 )
UpperCAmelCase__ : Any = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images
UpperCAmelCase__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : List[str] = np.random.RandomState(0 )
UpperCAmelCase__ : str = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : int = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class _lowercase :
'''simple docstring'''
_A = BlenderbotSmallConfig
_A = {}
_A = 'gelu'
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=20 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=0 , )-> str:
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : int = batch_size
UpperCAmelCase__ : str = seq_length
UpperCAmelCase__ : Dict = is_training
UpperCAmelCase__ : Optional[Any] = use_labels
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : int = attention_probs_dropout_prob
UpperCAmelCase__ : Tuple = max_position_embeddings
UpperCAmelCase__ : Tuple = eos_token_id
UpperCAmelCase__ : Optional[Any] = pad_token_id
UpperCAmelCase__ : Tuple = bos_token_id
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase__ : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase__ : List[str] = prepare_blenderbot_small_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> Any:
UpperCAmelCase__ : Union[str, Any] = TFBlenderbotSmallModel(config=__UpperCamelCase ).get_decoder()
UpperCAmelCase__ : Any = inputs_dict["input_ids"]
UpperCAmelCase__ : Tuple = input_ids[:1, :]
UpperCAmelCase__ : str = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase__ : int = inputs_dict["head_mask"]
UpperCAmelCase__ : Dict = 1
# first forward pass
UpperCAmelCase__ : str = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase__ : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase__ : str = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase__ : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase__ : Dict = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
UpperCAmelCase__ : List[str] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase__ : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase__ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase__ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1E-3 )
def a__ ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Any=None , lowerCAmelCase : List[Any]=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase__ : List[str] = tf.cast(tf.math.not_equal(lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase__ : Tuple = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_A = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_A = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A = True
_A = False
_A = False
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Optional[int] = TFBlenderbotSmallModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
@require_tokenizers
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
_A = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
_A = 'facebook/blenderbot_small-90M'
@cached_property
def lowerCAmelCase__ ( self )-> List[str]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Optional[Any] = self.tokenizer(self.src_text , return_tensors="tf" )
UpperCAmelCase__ : Union[str, Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , )
UpperCAmelCase__ : List[str] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 660 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'table-transformer'
_A = ['past_key_values']
_A = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=1_00 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , )-> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase__ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : int = backbone_config.get("model_type" )
UpperCAmelCase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : int = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = None, None, None
UpperCAmelCase__ : Optional[int] = use_timm_backbone
UpperCAmelCase__ : Dict = backbone_config
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = num_queries
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : Optional[int] = encoder_ffn_dim
UpperCAmelCase__ : str = encoder_layers
UpperCAmelCase__ : Dict = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : List[str] = dropout
UpperCAmelCase__ : Tuple = attention_dropout
UpperCAmelCase__ : List[Any] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : List[str] = init_xavier_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Dict = auxiliary_loss
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : List[str] = backbone
UpperCAmelCase__ : List[Any] = use_pretrained_backbone
UpperCAmelCase__ : List[str] = dilation
# Hungarian matcher
UpperCAmelCase__ : Dict = class_cost
UpperCAmelCase__ : Any = bbox_cost
UpperCAmelCase__ : Tuple = giou_cost
# Loss coefficients
UpperCAmelCase__ : Any = mask_loss_coefficient
UpperCAmelCase__ : Dict = dice_loss_coefficient
UpperCAmelCase__ : Any = bbox_loss_coefficient
UpperCAmelCase__ : Tuple = giou_loss_coefficient
UpperCAmelCase__ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self )-> int:
return self.d_model
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-5
@property
def lowerCAmelCase__ ( self )-> int:
return 12
| 660 | 1 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
A__ : Optional[int] = logging.get_logger(__name__)
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['pixel_values']
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = 8 , **__UpperCamelCase , )-> None:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : List[str] = do_rescale
UpperCAmelCase__ : List[Any] = rescale_factor
UpperCAmelCase__ : Union[str, Any] = do_pad
UpperCAmelCase__ : Tuple = pad_size
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase )-> np.ndarray:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None )-> Optional[int]:
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = get_image_size(__UpperCamelCase )
UpperCAmelCase__ : Dict = (old_height // size + 1) * size - old_height
UpperCAmelCase__ : Optional[int] = (old_width // size + 1) * size - old_width
return pad(__UpperCamelCase , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> Tuple:
UpperCAmelCase__ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Union[str, Any] = do_pad if do_pad is not None else self.do_pad
UpperCAmelCase__ : List[Any] = pad_size if pad_size is not None else self.pad_size
UpperCAmelCase__ : str = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Union[str, Any] = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_rescale:
UpperCAmelCase__ : List[Any] = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_pad:
UpperCAmelCase__ : Optional[int] = [self.pad(__UpperCamelCase , size=__UpperCamelCase ) for image in images]
UpperCAmelCase__ : Dict = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
UpperCAmelCase__ : List[str] = {"pixel_values": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 660 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
A__ : int = getLogger(__name__)
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 1024 , lowerCAmelCase : List[Any]="val" , lowerCAmelCase : str=None , lowerCAmelCase : int=False , lowerCAmelCase : Dict="summarization" , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Dict = None , lowerCAmelCase : List[str]="" , **lowerCAmelCase : int , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = str(lowerCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=lowerCAmelCase )
UpperCAmelCase__ : List[str] = Path(lowerCAmelCase )
UpperCAmelCase__ : str = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).cuda()
if fpaa:
UpperCAmelCase__ : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase , lowerCAmelCase ) # update config with task specific params
UpperCAmelCase__ : List[Any] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase__ : Any = num_return_sequences
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase__ : int = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase__ : Union[str, Any] = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase__ : str = SeqaSeqDataset(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , max_target_length=1024 , type_path=lowerCAmelCase , n_obs=lowerCAmelCase , prefix=lowerCAmelCase , **lowerCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase__ : Union[str, Any] = ds.make_sortish_sampler(lowerCAmelCase , distributed=lowerCAmelCase , add_extra_examples=lowerCAmelCase , shuffle=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = DataLoader(lowerCAmelCase , sampler=lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn )
UpperCAmelCase__ : str = []
for batch in tqdm(lowerCAmelCase ):
UpperCAmelCase__ : Dict = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=lowerCAmelCase , num_beams=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase__ : int = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
UpperCAmelCase__ : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase__ : str = chunks(lowerCAmelCase , lowerCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(lowerCAmelCase , lowerCAmelCase )
return results, sampler.num_replicas
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=lowerCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=lowerCAmelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=lowerCAmelCase , default=lowerCAmelCase )
parser.add_argument(
"--type_path" , type=lowerCAmelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=lowerCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowerCAmelCase , default=8 , required=lowerCAmelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=lowerCAmelCase , default=-1 , required=lowerCAmelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=lowerCAmelCase , default=1 , required=lowerCAmelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=lowerCAmelCase , default=600 , required=lowerCAmelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument(
"--prefix" , type=lowerCAmelCase , required=lowerCAmelCase , default=lowerCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase__ : Optional[int] = time.time()
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = parser.parse_known_args()
UpperCAmelCase__ : int = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase__ : Dict = Path(args.save_dir + "_tmp" )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) # this handles locking.
UpperCAmelCase__ : List[str] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase__ : List[str] = {}
if args.src_lang is not None:
UpperCAmelCase__ : str = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase__ : List[str] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = eval_data_dir(
args.data_dir , lowerCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase , **lowerCAmelCase , )
if args.local_rank <= 0:
UpperCAmelCase__ : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ : Tuple = gather_results_from_each_node(lowerCAmelCase , lowerCAmelCase , args.sync_timeout )
UpperCAmelCase__ : Union[str, Any] = combine_partial_results(lowerCAmelCase )
if args.num_return_sequences > 1:
UpperCAmelCase__ : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowerCAmelCase , lowerCAmelCase )
return
UpperCAmelCase__ : Optional[Any] = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(lowerCAmelCase ) as f:
UpperCAmelCase__ : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase__ : List[Any] = "translation" in args.task
UpperCAmelCase__ : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase__ : Optional[Any] = "bleu" if calc_bleu else "rouge"
UpperCAmelCase__ : Dict = score_fn(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = time.time() - start_time
UpperCAmelCase__ : Optional[int] = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase__ : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase__ : Any = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
print(lowerCAmelCase )
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(lowerCAmelCase )
def a__ ( lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = []
for partial_result in partial_results:
records.extend(lowerCAmelCase )
UpperCAmelCase__ : Dict = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x["id"] )
UpperCAmelCase__ : List[str] = [x["pred"] for x in records]
return preds
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
# WAIT FOR lots of .json files
UpperCAmelCase__ : int = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase__ : Dict = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase__ : str = list(save_dir.glob("rank_*.json" ) )
if len(lowerCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase__ : Union[str, Any] = lmap(lowerCAmelCase , lowerCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 660 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A__ : int = None
A__ : List[str] = logging.get_logger(__name__)
A__ : Optional[int] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
A__ : Union[str, Any] = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
A__ : int = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
A__ : Any = """▁"""
# Segments (not really needed)
A__ : str = 0
A__ : int = 1
A__ : Union[str, Any] = 2
A__ : List[str] = 3
A__ : Optional[Any] = 4
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = 'left'
_A = XLNetTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<sep>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<cls>" , __UpperCamelCase="<mask>" , __UpperCamelCase=["<eop>", "<eod>"] , **__UpperCamelCase , )-> Any:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : Dict = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
vocab_file=__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
UpperCAmelCase__ : Any = 3
UpperCAmelCase__ : Union[str, Any] = do_lower_case
UpperCAmelCase__ : List[str] = remove_space
UpperCAmelCase__ : Union[str, Any] = keep_accents
UpperCAmelCase__ : List[Any] = vocab_file
UpperCAmelCase__ : Tuple = False if not self.vocab_file else True
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[int]:
UpperCAmelCase__ : List[Any] = [self.sep_token_id]
UpperCAmelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[int]:
UpperCAmelCase__ : str = [self.sep_token_id]
UpperCAmelCase__ : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ : Dict = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 660 |
"""simple docstring"""
from timeit import timeit
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ):
'''simple docstring'''
def do_benchmark(lowerCAmelCase : int ) -> None:
UpperCAmelCase__ : Dict = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Tuple = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowerCAmelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Any = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowerCAmelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
A__ : Optional[Any] = list[list[int]]
# assigning initial values to the grid
A__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
A__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a__ ( lowerCAmelCase : Matrix , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a__ ( lowerCAmelCase : Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a__ ( lowerCAmelCase : Matrix ):
'''simple docstring'''
if location := find_empty_location(lowerCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ : str = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : str = digit
if sudoku(lowerCAmelCase ) is not None:
return grid
UpperCAmelCase__ : Optional[int] = 0
return None
def a__ ( lowerCAmelCase : Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(lowerCAmelCase , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
A__ : int = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 660 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowercase ( unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[Any] = load_tool("text-classification" )
self.tool.setup()
UpperCAmelCase__ : List[str] = load_tool("text-classification" , remote=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[Any] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Any = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : str = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
| 660 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = [0] * len(lowerCAmelCase )
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : str = [1] * len(lowerCAmelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCAmelCase ) ):
if indegree[i] == 0:
queue.append(lowerCAmelCase )
while queue:
UpperCAmelCase__ : Union[str, Any] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCAmelCase__ : Dict = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowerCAmelCase )
print(max(lowerCAmelCase ) )
# Adjacency list of Graph
A__ : Optional[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 660 |
"""simple docstring"""
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ) ) )
def a__ ( lowerCAmelCase : list[float] ):
'''simple docstring'''
if point:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
for item in point:
if not isinstance(lowerCAmelCase , (int, float) ):
UpperCAmelCase__ : Tuple = (
"Expected a list of numbers as input, found "
F"{type(lowerCAmelCase ).__name__}"
)
raise TypeError(lowerCAmelCase )
else:
UpperCAmelCase__ : Dict = F"Expected a list of numbers as input, found {type(lowerCAmelCase ).__name__}"
raise TypeError(lowerCAmelCase )
else:
raise ValueError("Missing an input" )
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase , lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 1 |
"""simple docstring"""
from manim import *
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : str = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[Any] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : int = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("CPU" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Union[str, Any] = Text("GPU" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Model" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
rect.set_stroke(__UpperCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase__ : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCamelCase , buff=0.0 )
self.add(__UpperCamelCase )
cpu_targs.append(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Any = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Loaded Checkpoint" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , aligned_edge=__UpperCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : Any = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase__ : Optional[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.play(Write(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = fill.copy().set_fill(__UpperCamelCase , opacity=0.7 )
target.move_to(__UpperCamelCase )
first_animations.append(GrowFromCenter(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 660 |
"""simple docstring"""
import math
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( lowerCAmelCase : int = 1_0001 ):
'''simple docstring'''
try:
UpperCAmelCase__ : List[str] = int(lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
UpperCAmelCase__ : list[int] = []
UpperCAmelCase__ : str = 2
while len(lowerCAmelCase ) < nth:
if is_prime(lowerCAmelCase ):
primes.append(lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Tuple = ort.SessionOptions()
UpperCAmelCase__ : List[str] = False
return options
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : Tuple = np.random.RandomState(0 )
UpperCAmelCase__ : Any = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images
UpperCAmelCase__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : List[str] = np.random.RandomState(0 )
UpperCAmelCase__ : str = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : int = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 660 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=0.6 , __UpperCamelCase=None , )-> List[Any]:
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : int = mask_ratio
UpperCAmelCase__ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self )-> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = ViTMAEModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[Any] = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCAmelCase__ : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_A = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = ViTMAEModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self )-> Dict:
pass
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Dict = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCAmelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ : str = torch.from_numpy(__UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase__ : Optional[Any] = pt_noise
super().check_pt_tf_models(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[Any] = outputs[0].cpu().numpy()
UpperCAmelCase__ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model_class.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
# Make sure we don't have nans
UpperCAmelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> List[str]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Any:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Optional[Any]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self )-> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
pass
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ViTMAEModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self )-> List[Any]:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self )-> Optional[int]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase__ : Any = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCamelCase )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase__ : List[Any] = ViTMAEConfig()
UpperCAmelCase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase__ : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**__UpperCamelCase , noise=torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase ) )
# verify the logits
UpperCAmelCase__ : str = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCamelCase ) , atol=1E-4 ) )
| 660 | 1 |
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
A__ : Any = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
A__ : int = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def a__ ( lowerCAmelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=lowerCAmelCase )[0]
@deprecated(lowerCAmelCase , "Please use tf.data to implement this functionality." )
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=lowerCAmelCase ) as bytestream:
UpperCAmelCase__ : Optional[int] = _readaa(lowerCAmelCase )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
UpperCAmelCase__ : Any = _readaa(lowerCAmelCase )
UpperCAmelCase__ : int = _readaa(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = _readaa(lowerCAmelCase )
UpperCAmelCase__ : Tuple = bytestream.read(rows * cols * num_images )
UpperCAmelCase__ : Dict = numpy.frombuffer(lowerCAmelCase , dtype=numpy.uinta )
UpperCAmelCase__ : List[Any] = data.reshape(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , 1 )
return data
@deprecated(lowerCAmelCase , "Please use tf.one_hot on tensors." )
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = labels_dense.shape[0]
UpperCAmelCase__ : str = numpy.arange(lowerCAmelCase ) * num_classes
UpperCAmelCase__ : Union[str, Any] = numpy.zeros((num_labels, num_classes) )
UpperCAmelCase__ : str = 1
return labels_one_hot
@deprecated(lowerCAmelCase , "Please use tf.data to implement this functionality." )
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any]=False , lowerCAmelCase : List[str]=10 ):
'''simple docstring'''
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=lowerCAmelCase ) as bytestream:
UpperCAmelCase__ : Union[str, Any] = _readaa(lowerCAmelCase )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
UpperCAmelCase__ : List[str] = _readaa(lowerCAmelCase )
UpperCAmelCase__ : Dict = bytestream.read(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = numpy.frombuffer(lowerCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(lowerCAmelCase , lowerCAmelCase )
return labels
class _lowercase :
'''simple docstring'''
@deprecated(
__UpperCamelCase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=dtypes.floataa , __UpperCamelCase=True , __UpperCamelCase=None , )-> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : int = random_seed.get_seed(__UpperCamelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
UpperCAmelCase__ : Tuple = dtypes.as_dtype(__UpperCamelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
UpperCAmelCase__ : List[str] = 1_00_00
UpperCAmelCase__ : Tuple = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F"images.shape: {images.shape} labels.shape: {labels.shape}"
UpperCAmelCase__ : str = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
UpperCAmelCase__ : Optional[Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
UpperCAmelCase__ : Dict = images.astype(numpy.floataa )
UpperCAmelCase__ : Dict = numpy.multiply(__UpperCamelCase , 1.0 / 255.0 )
UpperCAmelCase__ : Union[str, Any] = images
UpperCAmelCase__ : Optional[Any] = labels
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = 0
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return self._images
@property
def lowerCAmelCase__ ( self )-> str:
return self._labels
@property
def lowerCAmelCase__ ( self )-> Tuple:
return self._num_examples
@property
def lowerCAmelCase__ ( self )-> Union[str, Any]:
return self._epochs_completed
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=True )-> Any:
if fake_data:
UpperCAmelCase__ : Dict = [1] * 7_84
UpperCAmelCase__ : str = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__UpperCamelCase )],
[fake_label for _ in range(__UpperCamelCase )],
)
UpperCAmelCase__ : Dict = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
UpperCAmelCase__ : str = numpy.arange(self._num_examples )
numpy.random.shuffle(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = self.images[perma]
UpperCAmelCase__ : str = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
UpperCAmelCase__ : str = self._num_examples - start
UpperCAmelCase__ : int = self._images[start : self._num_examples]
UpperCAmelCase__ : Optional[int] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
UpperCAmelCase__ : Any = numpy.arange(self._num_examples )
numpy.random.shuffle(__UpperCamelCase )
UpperCAmelCase__ : int = self.images[perm]
UpperCAmelCase__ : Optional[int] = self.labels[perm]
# Start next epoch
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : List[Any] = batch_size - rest_num_examples
UpperCAmelCase__ : List[Any] = self._index_in_epoch
UpperCAmelCase__ : int = self._images[start:end]
UpperCAmelCase__ : int = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
UpperCAmelCase__ : List[str] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowerCAmelCase , "Please write your own downloading logic." )
def a__ ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict ):
'''simple docstring'''
if not gfile.Exists(lowerCAmelCase ):
gfile.MakeDirs(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
if not gfile.Exists(lowerCAmelCase ):
urllib.request.urlretrieve(lowerCAmelCase , lowerCAmelCase ) # noqa: S310
with gfile.GFile(lowerCAmelCase ) as f:
UpperCAmelCase__ : str = f.size()
print("Successfully downloaded" , lowerCAmelCase , lowerCAmelCase , "bytes." )
return filepath
@deprecated(
lowerCAmelCase , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def a__ ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str]=False , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : str=dtypes.floataa , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=5000 , lowerCAmelCase : Any=None , lowerCAmelCase : Tuple=DEFAULT_SOURCE_URL , ):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=lowerCAmelCase , one_hot=lowerCAmelCase , dtype=lowerCAmelCase , seed=lowerCAmelCase )
UpperCAmelCase__ : Any = fake()
UpperCAmelCase__ : Union[str, Any] = fake()
UpperCAmelCase__ : Tuple = fake()
return _Datasets(train=lowerCAmelCase , validation=lowerCAmelCase , test=lowerCAmelCase )
if not source_url: # empty string check
UpperCAmelCase__ : List[Any] = DEFAULT_SOURCE_URL
UpperCAmelCase__ : Optional[int] = "train-images-idx3-ubyte.gz"
UpperCAmelCase__ : Optional[int] = "train-labels-idx1-ubyte.gz"
UpperCAmelCase__ : Dict = "t10k-images-idx3-ubyte.gz"
UpperCAmelCase__ : Any = "t10k-labels-idx1-ubyte.gz"
UpperCAmelCase__ : str = _maybe_download(
lowerCAmelCase , lowerCAmelCase , source_url + train_images_file )
with gfile.Open(lowerCAmelCase , "rb" ) as f:
UpperCAmelCase__ : int = _extract_images(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = _maybe_download(
lowerCAmelCase , lowerCAmelCase , source_url + train_labels_file )
with gfile.Open(lowerCAmelCase , "rb" ) as f:
UpperCAmelCase__ : List[str] = _extract_labels(lowerCAmelCase , one_hot=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = _maybe_download(
lowerCAmelCase , lowerCAmelCase , source_url + test_images_file )
with gfile.Open(lowerCAmelCase , "rb" ) as f:
UpperCAmelCase__ : List[str] = _extract_images(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = _maybe_download(
lowerCAmelCase , lowerCAmelCase , source_url + test_labels_file )
with gfile.Open(lowerCAmelCase , "rb" ) as f:
UpperCAmelCase__ : List[Any] = _extract_labels(lowerCAmelCase , one_hot=lowerCAmelCase )
if not 0 <= validation_size <= len(lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = (
"Validation size should be between 0 and "
F"{len(lowerCAmelCase )}. Received: {validation_size}."
)
raise ValueError(lowerCAmelCase )
UpperCAmelCase__ : str = train_images[:validation_size]
UpperCAmelCase__ : Any = train_labels[:validation_size]
UpperCAmelCase__ : Optional[int] = train_images[validation_size:]
UpperCAmelCase__ : Dict = train_labels[validation_size:]
UpperCAmelCase__ : Optional[int] = {"dtype": dtype, "reshape": reshape, "seed": seed}
UpperCAmelCase__ : Optional[Any] = _DataSet(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = _DataSet(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : Any = _DataSet(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
return _Datasets(train=lowerCAmelCase , validation=lowerCAmelCase , test=lowerCAmelCase )
| 660 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowercase :
'''simple docstring'''
_A = 42
# setable values
_A = 42
_A = 42
_A = None
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
return cls(common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase )
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_A = [e.name for e in FlaxKarrasDiffusionSchedulers]
_A = 42
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return True
@register_to_config
def __init__( self , __UpperCamelCase = 10_00 , __UpperCamelCase = 0.0001 , __UpperCamelCase = 0.02 , __UpperCamelCase = "linear" , __UpperCamelCase = None , __UpperCamelCase = "fixed_small" , __UpperCamelCase = True , __UpperCamelCase = "epsilon" , __UpperCamelCase = jnp.floataa , )-> List[str]:
UpperCAmelCase__ : int = dtype
def lowerCAmelCase__ ( self , __UpperCamelCase = None )-> DDPMSchedulerState:
if common is None:
UpperCAmelCase__ : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None )-> jnp.ndarray:
return sample
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = () )-> DDPMSchedulerState:
UpperCAmelCase__ : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Optional[int] = (jnp.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : Any = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : Dict = jnp.clip(__UpperCamelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Tuple = jnp.log(jnp.clip(__UpperCamelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
UpperCAmelCase__ : Optional[int] = (predicted_variance + 1) / 2
UpperCAmelCase__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , )-> Union[FlaxDDPMSchedulerOutput, Tuple]:
UpperCAmelCase__ : List[str] = timestep
if key is None:
UpperCAmelCase__ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = jnp.split(__UpperCamelCase , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : Optional[Any] = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : Union[str, Any] = 1 - alpha_prod_t
UpperCAmelCase__ : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : Any = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : List[Any] = jnp.clip(__UpperCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : Any = jax.random.split(__UpperCamelCase , num=1 )
UpperCAmelCase__ : int = jax.random.normal(__UpperCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__UpperCamelCase , __UpperCamelCase , predicted_variance=__UpperCamelCase ) ** 0.5) * noise
UpperCAmelCase__ : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__UpperCamelCase , state=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return add_noise_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return get_velocity_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __len__( self )-> Tuple:
return self.config.num_train_timesteps
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def a__ ( lowerCAmelCase : Sequence[float] , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
UpperCAmelCase__ : Optional[Any] = (low + high) // 2
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = max_subarray(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = max_subarray(lowerCAmelCase , mid + 1 , lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = max_cross_sum(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def a__ ( lowerCAmelCase : Sequence[float] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = float("-inf" ), -1
UpperCAmelCase__ , UpperCAmelCase__ : Any = float("-inf" ), -1
UpperCAmelCase__ : int | float = 0
for i in range(lowerCAmelCase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
UpperCAmelCase__ : Optional[int] = summ
UpperCAmelCase__ : str = i
UpperCAmelCase__ : Any = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
UpperCAmelCase__ : Optional[Any] = summ
UpperCAmelCase__ : int = i
return max_left, max_right, (left_sum + right_sum)
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = [randint(1 , lowerCAmelCase ) for _ in range(lowerCAmelCase )]
UpperCAmelCase__ : List[str] = time.time()
max_subarray(lowerCAmelCase , 0 , input_size - 1 )
UpperCAmelCase__ : Optional[Any] = time.time()
return end - start
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
UpperCAmelCase__ : List[Any] = [time_max_subarray(lowerCAmelCase ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(lowerCAmelCase , lowerCAmelCase ):
print(lowerCAmelCase , "\t\t" , lowerCAmelCase )
plt.plot(lowerCAmelCase , lowerCAmelCase )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 660 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ''
_A = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
super().__init__(self , **__UpperCamelCase )
UpperCAmelCase__ : int = repo_info
UpperCAmelCase__ : Optional[int] = token
UpperCAmelCase__ : Optional[Any] = None
def lowerCAmelCase__ ( self )-> Optional[Any]:
if self.dir_cache is None:
UpperCAmelCase__ : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ : str = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"name": str(__UpperCamelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , )-> List[Any]:
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> List[str]:
self._get_dirs()
UpperCAmelCase__ : Union[str, Any] = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase )-> str:
self._get_dirs()
UpperCAmelCase__ : str = PurePosixPath(path.strip("/" ) )
UpperCAmelCase__ : Optional[Any] = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ : Optional[int] = PurePosixPath(p.strip("/" ) )
UpperCAmelCase__ : Dict = p.parent
if root == path:
UpperCAmelCase__ : Tuple = f
UpperCAmelCase__ : List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
A__ : Any = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
A__ : List[str] = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
A__ : Optional[int] = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
A__ : Dict = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
A__ : Dict = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
A__ : Optional[int] = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
def a__ ( lowerCAmelCase : Dict ):
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def a__ ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple=False ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = checkpoint[F"{old_prefix}.in_layers.0.weight"]
UpperCAmelCase__ : Optional[Any] = checkpoint[F"{old_prefix}.in_layers.0.bias"]
UpperCAmelCase__ : str = checkpoint[F"{old_prefix}.in_layers.2.weight"]
UpperCAmelCase__ : int = checkpoint[F"{old_prefix}.in_layers.2.bias"]
UpperCAmelCase__ : Optional[int] = checkpoint[F"{old_prefix}.emb_layers.1.weight"]
UpperCAmelCase__ : List[str] = checkpoint[F"{old_prefix}.emb_layers.1.bias"]
UpperCAmelCase__ : Dict = checkpoint[F"{old_prefix}.out_layers.0.weight"]
UpperCAmelCase__ : Tuple = checkpoint[F"{old_prefix}.out_layers.0.bias"]
UpperCAmelCase__ : str = checkpoint[F"{old_prefix}.out_layers.3.weight"]
UpperCAmelCase__ : Optional[Any] = checkpoint[F"{old_prefix}.out_layers.3.bias"]
if has_skip:
UpperCAmelCase__ : int = checkpoint[F"{old_prefix}.skip_connection.weight"]
UpperCAmelCase__ : str = checkpoint[F"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def a__ ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = checkpoint[F"{old_prefix}.qkv.weight"].chunk(3 , dim=0 )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = checkpoint[F"{old_prefix}.qkv.bias"].chunk(3 , dim=0 )
UpperCAmelCase__ : int = checkpoint[F"{old_prefix}.norm.weight"]
UpperCAmelCase__ : Any = checkpoint[F"{old_prefix}.norm.bias"]
UpperCAmelCase__ : int = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : Union[str, Any] = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : Union[str, Any] = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : int = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : Union[str, Any] = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase__ : int = (
checkpoint[F"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase__ : Optional[Any] = checkpoint[F"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def a__ ( lowerCAmelCase : str , lowerCAmelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = torch.load(lowerCAmelCase , map_location="cpu" )
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : Optional[int] = checkpoint["time_embed.0.weight"]
UpperCAmelCase__ : str = checkpoint["time_embed.0.bias"]
UpperCAmelCase__ : Optional[Any] = checkpoint["time_embed.2.weight"]
UpperCAmelCase__ : int = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase__ : str = checkpoint["label_emb.weight"]
UpperCAmelCase__ : List[str] = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase__ : int = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase__ : Any = unet_config["down_block_types"]
UpperCAmelCase__ : List[Any] = unet_config["layers_per_block"]
UpperCAmelCase__ : str = unet_config["attention_head_dim"]
UpperCAmelCase__ : List[str] = unet_config["block_out_channels"]
UpperCAmelCase__ : Optional[Any] = 1
UpperCAmelCase__ : List[Any] = channels_list[0]
for i, layer_type in enumerate(lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = channels_list[i]
UpperCAmelCase__ : Tuple = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowerCAmelCase ):
UpperCAmelCase__ : str = F"down_blocks.{i}.resnets.{j}"
UpperCAmelCase__ : List[str] = F"input_blocks.{current_layer}.0"
UpperCAmelCase__ : Tuple = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase__ : int = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , has_skip=lowerCAmelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowerCAmelCase ):
UpperCAmelCase__ : str = F"down_blocks.{i}.resnets.{j}"
UpperCAmelCase__ : List[str] = F"input_blocks.{current_layer}.0"
UpperCAmelCase__ : Optional[int] = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase__ : Any = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , has_skip=lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = F"down_blocks.{i}.attentions.{j}"
UpperCAmelCase__ : int = F"input_blocks.{current_layer}.1"
UpperCAmelCase__ : Any = convert_attention(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
current_layer += 1
if i != len(lowerCAmelCase ) - 1:
UpperCAmelCase__ : Dict = F"down_blocks.{i}.downsamplers.0"
UpperCAmelCase__ : Any = F"input_blocks.{current_layer}.0"
UpperCAmelCase__ : str = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
current_layer += 1
UpperCAmelCase__ : Union[str, Any] = current_channels
# hardcoded the mid-block for now
UpperCAmelCase__ : Dict = "mid_block.resnets.0"
UpperCAmelCase__ : Any = "middle_block.0"
UpperCAmelCase__ : str = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[str] = "mid_block.attentions.0"
UpperCAmelCase__ : List[str] = "middle_block.1"
UpperCAmelCase__ : Optional[int] = convert_attention(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = "mid_block.resnets.1"
UpperCAmelCase__ : Optional[int] = "middle_block.2"
UpperCAmelCase__ : Optional[int] = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Tuple = unet_config["up_block_types"]
for i, layer_type in enumerate(lowerCAmelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase__ : int = F"up_blocks.{i}.resnets.{j}"
UpperCAmelCase__ : int = F"output_blocks.{current_layer}.0"
UpperCAmelCase__ : Optional[Any] = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , has_skip=lowerCAmelCase )
current_layer += 1
if i != len(lowerCAmelCase ) - 1:
UpperCAmelCase__ : str = F"up_blocks.{i}.upsamplers.0"
UpperCAmelCase__ : Any = F"output_blocks.{current_layer-1}.1"
UpperCAmelCase__ : Optional[Any] = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase__ : Union[str, Any] = F"up_blocks.{i}.resnets.{j}"
UpperCAmelCase__ : Tuple = F"output_blocks.{current_layer}.0"
UpperCAmelCase__ : Optional[Any] = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , has_skip=lowerCAmelCase )
UpperCAmelCase__ : List[str] = F"up_blocks.{i}.attentions.{j}"
UpperCAmelCase__ : Dict = F"output_blocks.{current_layer}.1"
UpperCAmelCase__ : int = convert_attention(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
current_layer += 1
if i != len(lowerCAmelCase ) - 1:
UpperCAmelCase__ : int = F"up_blocks.{i}.upsamplers.0"
UpperCAmelCase__ : Tuple = F"output_blocks.{current_layer-1}.2"
UpperCAmelCase__ : int = convert_resnet(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = checkpoint["out.0.weight"]
UpperCAmelCase__ : Optional[int] = checkpoint["out.0.bias"]
UpperCAmelCase__ : Any = checkpoint["out.2.weight"]
UpperCAmelCase__ : str = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
A__ : List[str] = parser.parse_args()
A__ : Union[str, Any] = strabool(args.class_cond)
A__ : List[str] = os.path.basename(args.unet_path)
print(f"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
A__ : Optional[Any] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
A__ : Any = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
A__ : str = TEST_UNET_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
A__ : Optional[int] = None
A__ : List[Any] = con_pt_to_diffuser(args.unet_path, unet_config)
A__ : str = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
A__ : Optional[Any] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
A__ : List[Any] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
A__ : Any = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
A__ : List[Any] = CMStochasticIterativeScheduler(**scheduler_config)
A__ : Any = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 660 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : Dict = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['pixel_values']
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase__ : List[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase__ : int = get_size_dict(__UpperCamelCase , param_name="crop_size" )
UpperCAmelCase__ : Dict = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : List[Any] = do_center_crop
UpperCAmelCase__ : str = crop_size
UpperCAmelCase__ : Optional[int] = resample
UpperCAmelCase__ : int = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : Union[str, Any] = offset
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(__UpperCamelCase , size["shortest_edge"] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
UpperCAmelCase__ : Any = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[Any] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> Tuple:
UpperCAmelCase__ : str = image.astype(np.floataa )
if offset:
UpperCAmelCase__ : Tuple = image - (scale / 2)
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , )-> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[Any] = to_numpy_array(__UpperCamelCase )
if do_resize:
UpperCAmelCase__ : Union[str, Any] = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
UpperCAmelCase__ : int = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
UpperCAmelCase__ : List[str] = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase , offset=__UpperCamelCase )
if do_normalize:
UpperCAmelCase__ : List[Any] = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
UpperCAmelCase__ : Dict = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> PIL.Image.Image:
UpperCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Optional[int] = offset if offset is not None else self.offset
UpperCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Tuple = get_size_dict(__UpperCamelCase , param_name="crop_size" )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase__ : List[str] = make_batched(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , offset=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase__ : Dict = {"pixel_values": videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 660 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = ShapEPipeline
_A = ['prompt']
_A = ['prompt']
_A = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
_A = False
@property
def lowerCAmelCase__ ( self )-> List[str]:
return 32
@property
def lowerCAmelCase__ ( self )-> str:
return 32
@property
def lowerCAmelCase__ ( self )-> Optional[Any]:
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self )-> Optional[Any]:
return 8
@property
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self )-> List[str]:
torch.manual_seed(0 )
UpperCAmelCase__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> Any:
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
UpperCAmelCase__ : int = PriorTransformer(**__UpperCamelCase )
return model
@property
def lowerCAmelCase__ ( self )-> Tuple:
torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase__ : Union[str, Any] = ShapERenderer(**__UpperCamelCase )
return model
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Optional[Any] = self.dummy_prior
UpperCAmelCase__ : Optional[Any] = self.dummy_text_encoder
UpperCAmelCase__ : List[Any] = self.dummy_tokenizer
UpperCAmelCase__ : Dict = self.dummy_renderer
UpperCAmelCase__ : Any = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=10_24 , prediction_type="sample" , use_karras_sigmas=__UpperCamelCase , clip_sample=__UpperCamelCase , clip_sample_range=1.0 , )
UpperCAmelCase__ : List[Any] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=0 )-> List[str]:
if str(__UpperCamelCase ).startswith("mps" ):
UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(__UpperCamelCase )
else:
UpperCAmelCase__ : Dict = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : Optional[Any] = "cpu"
UpperCAmelCase__ : List[str] = self.get_dummy_components()
UpperCAmelCase__ : List[str] = self.pipeline_class(**__UpperCamelCase )
UpperCAmelCase__ : int = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : Any = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
UpperCAmelCase__ : int = output.images[0]
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase__ : Any = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self )-> str:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : Union[str, Any] = torch_device == "cpu"
UpperCAmelCase__ : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__UpperCamelCase , relax_max_difference=__UpperCamelCase , )
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : List[Any] = self.get_dummy_components()
UpperCAmelCase__ : Union[str, Any] = self.pipeline_class(**__UpperCamelCase )
UpperCAmelCase__ : Any = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : Tuple = 1
UpperCAmelCase__ : Tuple = 2
UpperCAmelCase__ : Tuple = self.get_dummy_inputs(__UpperCamelCase )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase__ : int = batch_size * [inputs[key]]
UpperCAmelCase__ : Optional[int] = pipe(**__UpperCamelCase , num_images_per_prompt=__UpperCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
UpperCAmelCase__ : Optional[Any] = ShapEPipeline.from_pretrained("openai/shap-e" )
UpperCAmelCase__ : Optional[Any] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : Tuple = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
UpperCAmelCase__ : Dict = pipe(
"a shark" , generator=__UpperCamelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
| 660 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(lowerCAmelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Dict ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def a__ ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any]=0 ):
'''simple docstring'''
return sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[column] )
def a__ ( lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : Any=float("inf" ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , lowerCAmelCase ):
UpperCAmelCase__ : Tuple = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase__ : List[str] = current_dis
return min_dis
def a__ ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any]=float("inf" ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , lowerCAmelCase ):
for j in range(max(0 , i - 6 ) , lowerCAmelCase ):
UpperCAmelCase__ : List[str] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase__ : List[str] = current_dis
return min_dis
def a__ ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
# base case
if points_counts <= 3:
return dis_between_closest_pair(lowerCAmelCase , lowerCAmelCase )
# recursion
UpperCAmelCase__ : List[Any] = points_counts // 2
UpperCAmelCase__ : str = closest_pair_of_points_sqr(
lowerCAmelCase , points_sorted_on_y[:mid] , lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = closest_pair_of_points_sqr(
lowerCAmelCase , points_sorted_on_y[mid:] , points_counts - mid )
UpperCAmelCase__ : List[str] = min(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowerCAmelCase )
UpperCAmelCase__ : Tuple = dis_between_closest_in_strip(
lowerCAmelCase , len(lowerCAmelCase ) , lowerCAmelCase )
return min(lowerCAmelCase , lowerCAmelCase )
def a__ ( lowerCAmelCase : Any , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = column_based_sort(lowerCAmelCase , column=0 )
UpperCAmelCase__ : Dict = column_based_sort(lowerCAmelCase , column=1 )
return (
closest_pair_of_points_sqr(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
) ** 0.5
if __name__ == "__main__":
A__ : List[str] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 660 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A__ : Optional[Any] = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : bool , lowerCAmelCase : bool ):
'''simple docstring'''
def run_func(lowerCAmelCase : Dict ):
@wraps(lowerCAmelCase )
def run_in_eager_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict ):
return func(*lowerCAmelCase , **lowerCAmelCase )
@wraps(lowerCAmelCase )
@tf.function(experimental_compile=lowerCAmelCase )
def run_in_graph_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any] ):
return func(*lowerCAmelCase , **lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a__ ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = random.Random()
UpperCAmelCase__ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = 42
_A = "TensorFlow"
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return tf.__version__
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
# initialize GPU on separate process
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Union[str, Any] = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
UpperCAmelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : List[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Any = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Optional[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : Union[str, Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Optional[int] = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : str = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : Any = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : List[Any] = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Dict = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : Any = TF_MODEL_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : int = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Optional[Any] = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , training=__UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__UpperCamelCase , training=__UpperCamelCase )
UpperCAmelCase__ : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : List[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Any = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : Any = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : int = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : int = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : Union[str, Any] = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Dict = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Union[str, Any] = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Any = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
UpperCAmelCase__ : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase__ ( self , __UpperCamelCase )-> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__UpperCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase__ : Optional[Any] = timeit.repeat(
__UpperCamelCase , repeat=self.args.repeat , number=10 , )
return min(__UpperCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCAmelCase__ : List[str] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCAmelCase__ : Optional[int] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(__UpperCamelCase )
UpperCAmelCase__ : str = meminfo.used
UpperCAmelCase__ : int = Memory(__UpperCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCAmelCase__ : Any = None
else:
UpperCAmelCase__ : List[Any] = measure_peak_memory_cpu(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = Memory(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase__ : Optional[Any] = stop_memory_tracing(__UpperCamelCase )
if memory is None:
UpperCAmelCase__ : Tuple = summary.total
else:
UpperCAmelCase__ : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 660 | 1 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = np.full((len(lowerCAmelCase ), sequence_length, 2) , lowerCAmelCase )
else:
UpperCAmelCase__ : Optional[Any] = np.full((len(lowerCAmelCase ), sequence_length) , lowerCAmelCase )
for i, tensor in enumerate(lowerCAmelCase ):
if padding_side == "right":
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Dict = tensor[:sequence_length]
else:
UpperCAmelCase__ : Tuple = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase__ : int = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ord(lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase__ : Optional[Any] = unicodedata.category(lowerCAmelCase )
if cat.startswith("P" ):
return True
return False
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = True
_A = None
_A = None
_A = -100
_A = "pt"
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
import torch
UpperCAmelCase__ : Optional[Any] = "label" if "label" in features[0].keys() else "labels"
UpperCAmelCase__ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase__ : str = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch["entity_ids"] ).shape[1]
UpperCAmelCase__ : int = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase__ : int = [
list(__UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) for label in labels
]
else:
UpperCAmelCase__ : List[Any] = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) + list(__UpperCamelCase ) for label in labels
]
UpperCAmelCase__ : Optional[Any] = [feature["ner_tags"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , -1 , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = [feature["original_entity_spans"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , (-1, -1) , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {k: torch.tensor(__UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 660 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
A__ : List[str] = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 660 | 1 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
A__ : str = logging.get_logger(__name__)
A__ : Any = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , *__UpperCamelCase , **__UpperCamelCase )-> Optional[int]:
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
if config is None:
assert isinstance(self.model , __UpperCamelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F" {self.model.__class__}"
)
UpperCAmelCase__ : Optional[int] = self.model.config
else:
UpperCAmelCase__ : List[Any] = config
UpperCAmelCase__ : Tuple = data_args
UpperCAmelCase__ : List[str] = self.config.tgt_vocab_size if isinstance(self.config , __UpperCamelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
" padding.." )
if self.args.label_smoothing == 0:
UpperCAmelCase__ : List[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
UpperCAmelCase__ : str = label_smoothed_nll_loss
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Tuple:
if self.optimizer is None:
UpperCAmelCase__ : List[Any] = ["bias", "LayerNorm.weight"]
UpperCAmelCase__ : Any = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
UpperCAmelCase__ : Dict = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
UpperCAmelCase__ : int = Adafactor
UpperCAmelCase__ : Optional[int] = {"scale_parameter": False, "relative_step": False}
else:
UpperCAmelCase__ : Tuple = AdamW
UpperCAmelCase__ : Any = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
UpperCAmelCase__ : Dict = self.args.learning_rate
if self.sharded_ddp:
UpperCAmelCase__ : Optional[int] = OSS(
params=__UpperCamelCase , optim=__UpperCamelCase , **__UpperCamelCase , )
else:
UpperCAmelCase__ : Dict = optimizer_cls(__UpperCamelCase , **__UpperCamelCase )
if self.lr_scheduler is None:
UpperCAmelCase__ : Dict = self._get_lr_scheduler(__UpperCamelCase )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Dict:
UpperCAmelCase__ : Union[str, Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
UpperCAmelCase__ : Dict = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
UpperCAmelCase__ : Tuple = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
UpperCAmelCase__ : Optional[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__UpperCamelCase )
return scheduler
def lowerCAmelCase__ ( self )-> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
UpperCAmelCase__ : List[Any] = model(**__UpperCamelCase , use_cache=__UpperCamelCase )[0]
UpperCAmelCase__ : Any = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = model(**__UpperCamelCase , labels=__UpperCamelCase , use_cache=__UpperCamelCase )[:2]
else:
# compute label smoothed loss
UpperCAmelCase__ : Tuple = model(**__UpperCamelCase , use_cache=__UpperCamelCase )[0]
UpperCAmelCase__ : Tuple = torch.nn.functional.log_softmax(__UpperCamelCase , dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.loss_fn(__UpperCamelCase , __UpperCamelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCAmelCase__ : Optional[int] = inputs.pop("labels" )
UpperCAmelCase__ , UpperCAmelCase__ : int = self._compute_loss(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return loss
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , )-> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
UpperCAmelCase__ : Union[str, Any] = self._prepare_inputs(__UpperCamelCase )
UpperCAmelCase__ : Tuple = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
UpperCAmelCase__ : int = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **__UpperCamelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase__ : Dict = self._pad_tensors_to_max_len(__UpperCamelCase , gen_kwargs["max_length"] )
UpperCAmelCase__ : List[Any] = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self._compute_loss(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
UpperCAmelCase__ : List[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase__ : Optional[int] = self._pad_tensors_to_max_len(__UpperCamelCase , gen_kwargs["max_length"] )
return (loss, logits, labels)
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> str:
# If PAD token is not defined at least EOS token has to be defined
UpperCAmelCase__ : Optional[int] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
F" padded to `max_length`={max_length}" )
UpperCAmelCase__ : int = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
UpperCAmelCase__ : str = tensor
return padded_tensor
| 660 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , __UpperCamelCase = 7_68 , )-> Union[str, Any]:
super().__init__()
UpperCAmelCase__ : str = nn.Parameter(torch.zeros(1 , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[int] = nn.Parameter(torch.ones(1 , __UpperCamelCase ) )
def lowerCAmelCase__ ( self , __UpperCamelCase = None , __UpperCamelCase = None , )-> Any:
UpperCAmelCase__ : Dict = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) )
UpperCAmelCase__ : Any = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) )
return self
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : Any = (embeds * self.std) + self.mean
return embeds
| 660 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def a__ ( lowerCAmelCase : dict[int, list[int]] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Dict = len(lowerCAmelCase ) # No of vertices in graph
UpperCAmelCase__ : str = [0] * n
UpperCAmelCase__ : Optional[Any] = [False] * n
def dfs(lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : List[str] ):
UpperCAmelCase__ : int = True
UpperCAmelCase__ : List[Any] = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , id_ )
UpperCAmelCase__ : Any = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
UpperCAmelCase__ : int = min(low[at] , low[to] )
UpperCAmelCase__ : list[tuple[int, int]] = []
for i in range(lowerCAmelCase ):
if not visited[i]:
dfs(lowerCAmelCase , -1 , lowerCAmelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
'''simple docstring'''
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[int] = GPTaConfig()
else:
UpperCAmelCase__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = GPTaModel(lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
UpperCAmelCase__ : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase__ : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
A__ : Optional[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 1 |
"""simple docstring"""
import requests
A__ : Optional[int] = """YOUR API KEY"""
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str = giphy_api_key ):
'''simple docstring'''
UpperCAmelCase__ : str = "+".join(query.split() )
UpperCAmelCase__ : Dict = F"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"
UpperCAmelCase__ : List[Any] = requests.get(lowerCAmelCase ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 660 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A__ : Optional[int] = ["""small""", """medium""", """large"""]
A__ : Optional[int] = """lm_head.decoder.weight"""
A__ : Dict = """lm_head.weight"""
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.load(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = d.pop(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
torch.save(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
A__ : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A__ : Tuple = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
A__ : str = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 660 | 1 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
def wrapper(*lowerCAmelCase : Any , **lowerCAmelCase : Tuple ):
UpperCAmelCase__ : Optional[int] = timeit.default_timer()
UpperCAmelCase__ : int = func(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : List[Any] = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : int = func.__name__
return wrapper
def a__ ( lowerCAmelCase : dict , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[Any] = seq_shapes or {}
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
UpperCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Dict = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase__ : str = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
UpperCAmelCase__ : List[str] = v.feature
UpperCAmelCase__ : Optional[int] = seq_shapes[k]
UpperCAmelCase__ : Optional[int] = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
UpperCAmelCase__ : Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=100 , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : List[Any] = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase__ : Optional[int] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 660 |
"""simple docstring"""
from math import isqrt
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = False
return [i for i in range(2 , lowerCAmelCase ) if is_prime[i]]
def a__ ( lowerCAmelCase : int = 10**8 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = len(lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : List[Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
A__ : Optional[Any] = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
A__ : int = {"""facebook/blenderbot-3B""": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
UpperCAmelCase__ : List[str] = bs[:]
UpperCAmelCase__ : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
UpperCAmelCase__ : Optional[Any] = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def a__ ( lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = set()
UpperCAmelCase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : List[Any] = char
return pairs
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'attention_mask']
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="replace" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase=False , **__UpperCamelCase , )-> Optional[Any]:
UpperCAmelCase__ : Optional[int] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else bos_token
UpperCAmelCase__ : Optional[Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
UpperCAmelCase__ : int = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else sep_token
UpperCAmelCase__ : Optional[int] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else cls_token
UpperCAmelCase__ : Tuple = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
UpperCAmelCase__ : str = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : Union[str, Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
with open(__UpperCamelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase__ : Dict = json.load(__UpperCamelCase )
UpperCAmelCase__ : Any = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ : Optional[int] = errors # how to handle errors in decoding
UpperCAmelCase__ : Dict = bytes_to_unicode()
UpperCAmelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCamelCase , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ : str = merges_handle.read().split("\n" )[1:-1]
UpperCAmelCase__ : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase__ : str = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase__ : List[Any] = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCAmelCase__ ( self )-> Optional[Any]:
return len(self.encoder )
def lowerCAmelCase__ ( self )-> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Tuple:
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = get_pairs(__UpperCamelCase )
if not pairs:
return token
while True:
UpperCAmelCase__ : str = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = bigram
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : List[str] = 0
while i < len(__UpperCamelCase ):
try:
UpperCAmelCase__ : Dict = word.index(__UpperCamelCase , __UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : Tuple = j
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : List[Any] = tuple(__UpperCamelCase )
UpperCAmelCase__ : int = new_word
if len(__UpperCamelCase ) == 1:
break
else:
UpperCAmelCase__ : Any = get_pairs(__UpperCamelCase )
UpperCAmelCase__ : Dict = " ".join(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = word
return word
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Any:
UpperCAmelCase__ : Any = []
for token in re.findall(self.pat , __UpperCamelCase ):
UpperCAmelCase__ : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCamelCase ).split(" " ) )
return bpe_tokens
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Tuple:
return self.decoder.get(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[Any]:
UpperCAmelCase__ : List[str] = "".join(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ : Tuple = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : List[str] = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + "\n" )
UpperCAmelCase__ : Dict = 0
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase__ : str = token_index
writer.write(" ".join(__UpperCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[int]:
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : str = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCamelCase ) > 0 and not text[0].isspace()):
UpperCAmelCase__ : Optional[int] = " " + text
return (text, kwargs)
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[Any]:
return token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[int]:
UpperCAmelCase__ : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
UpperCAmelCase__ : Any = " ".join(__UpperCamelCase )
UpperCAmelCase__ : int = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
UpperCAmelCase__ : List[str] = input_ids[-self.model_max_length :]
logger.warning(F"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids
| 660 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = np.full((len(lowerCAmelCase ), sequence_length, 2) , lowerCAmelCase )
else:
UpperCAmelCase__ : Optional[Any] = np.full((len(lowerCAmelCase ), sequence_length) , lowerCAmelCase )
for i, tensor in enumerate(lowerCAmelCase ):
if padding_side == "right":
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Dict = tensor[:sequence_length]
else:
UpperCAmelCase__ : Tuple = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase__ : int = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ord(lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase__ : Optional[Any] = unicodedata.category(lowerCAmelCase )
if cat.startswith("P" ):
return True
return False
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = True
_A = None
_A = None
_A = -100
_A = "pt"
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
import torch
UpperCAmelCase__ : Optional[Any] = "label" if "label" in features[0].keys() else "labels"
UpperCAmelCase__ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase__ : str = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch["entity_ids"] ).shape[1]
UpperCAmelCase__ : int = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase__ : int = [
list(__UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) for label in labels
]
else:
UpperCAmelCase__ : List[Any] = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) + list(__UpperCamelCase ) for label in labels
]
UpperCAmelCase__ : Optional[Any] = [feature["ner_tags"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , -1 , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = [feature["original_entity_spans"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , (-1, -1) , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {k: torch.tensor(__UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> str:
UpperCAmelCase__ : list[dict] = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(__UpperCamelCase )
self.set_fail_transitions()
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowerCAmelCase__ ( self , __UpperCamelCase )-> None:
UpperCAmelCase__ : Optional[Any] = 0
for character in keyword:
UpperCAmelCase__ : Tuple = self.find_next_state(__UpperCamelCase , __UpperCamelCase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCAmelCase__ : List[Any] = len(self.adlist ) - 1
else:
UpperCAmelCase__ : List[Any] = next_state
self.adlist[current_state]["output"].append(__UpperCamelCase )
def lowerCAmelCase__ ( self )-> None:
UpperCAmelCase__ : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(__UpperCamelCase )
UpperCAmelCase__ : Dict = 0
while q:
UpperCAmelCase__ : Union[str, Any] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = self.adlist[r]["fail_state"]
while (
self.find_next_state(__UpperCamelCase , self.adlist[child]["value"] ) is None
and state != 0
):
UpperCAmelCase__ : Tuple = self.adlist[state]["fail_state"]
UpperCAmelCase__ : Optional[Any] = self.find_next_state(
__UpperCamelCase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : Any = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def lowerCAmelCase__ ( self , __UpperCamelCase )-> dict[str, list[int]]:
UpperCAmelCase__ : dict = {} # returns a dict with keywords and list of its occurrences
UpperCAmelCase__ : int = 0
for i in range(len(__UpperCamelCase ) ):
while (
self.find_next_state(__UpperCamelCase , string[i] ) is None
and current_state != 0
):
UpperCAmelCase__ : Dict = self.adlist[current_state]["fail_state"]
UpperCAmelCase__ : Optional[Any] = self.find_next_state(__UpperCamelCase , string[i] )
if next_state is None:
UpperCAmelCase__ : List[Any] = 0
else:
UpperCAmelCase__ : Optional[Any] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCAmelCase__ : Any = []
result[key].append(i - len(__UpperCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
def wrapper(*lowerCAmelCase : Any , **lowerCAmelCase : Tuple ):
UpperCAmelCase__ : Optional[int] = timeit.default_timer()
UpperCAmelCase__ : int = func(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : List[Any] = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : int = func.__name__
return wrapper
def a__ ( lowerCAmelCase : dict , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[Any] = seq_shapes or {}
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
UpperCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Dict = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase__ : str = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
UpperCAmelCase__ : List[str] = v.feature
UpperCAmelCase__ : Optional[int] = seq_shapes[k]
UpperCAmelCase__ : Optional[int] = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
UpperCAmelCase__ : Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=100 , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : List[Any] = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase__ : Optional[int] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 660 | 1 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A__ : Tuple = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase__ : List[str] = g.get_repo("huggingface/diffusers" )
UpperCAmelCase__ : Tuple = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase__ : Tuple = sorted(issue.get_comments() , key=lambda lowerCAmelCase : i.created_at , reverse=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = comments[0] if len(lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 660 |
"""simple docstring"""
from manim import *
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : str = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[Any] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : int = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("CPU" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Union[str, Any] = Text("GPU" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Model" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
rect.set_stroke(__UpperCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase__ : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCamelCase , buff=0.0 )
self.add(__UpperCamelCase )
cpu_targs.append(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Any = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Loaded Checkpoint" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , aligned_edge=__UpperCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : Any = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase__ : Optional[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.play(Write(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = fill.copy().set_fill(__UpperCamelCase , opacity=0.7 )
target.move_to(__UpperCamelCase )
first_animations.append(GrowFromCenter(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 660 | 1 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : str = logging.get_logger(__name__)
A__ : str = {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'segformer'
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=[2, 2, 2, 2] , __UpperCamelCase=[8, 4, 2, 1] , __UpperCamelCase=[32, 64, 1_60, 2_56] , __UpperCamelCase=[7, 3, 3, 3] , __UpperCamelCase=[4, 2, 2, 2] , __UpperCamelCase=[1, 2, 5, 8] , __UpperCamelCase=[4, 4, 4, 4] , __UpperCamelCase="gelu" , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase=0.02 , __UpperCamelCase=0.1 , __UpperCamelCase=1E-6 , __UpperCamelCase=2_56 , __UpperCamelCase=2_55 , **__UpperCamelCase , )-> Optional[int]:
super().__init__(**__UpperCamelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , __UpperCamelCase , )
UpperCAmelCase__ : Optional[int] = num_channels
UpperCAmelCase__ : Union[str, Any] = num_encoder_blocks
UpperCAmelCase__ : Tuple = depths
UpperCAmelCase__ : List[str] = sr_ratios
UpperCAmelCase__ : Union[str, Any] = hidden_sizes
UpperCAmelCase__ : Any = patch_sizes
UpperCAmelCase__ : List[Any] = strides
UpperCAmelCase__ : Optional[int] = mlp_ratios
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Any = classifier_dropout_prob
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Tuple = drop_path_rate
UpperCAmelCase__ : Optional[int] = layer_norm_eps
UpperCAmelCase__ : List[Any] = decoder_hidden_size
UpperCAmelCase__ : int = kwargs.get("reshape_last_stage" , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = semantic_loss_ignore_index
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-4
@property
def lowerCAmelCase__ ( self )-> int:
return 12
| 660 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A__ : Tuple = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : nn.ModuleList , lowerCAmelCase : nn.ModuleList , lowerCAmelCase : List[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F"{len(lowerCAmelCase )} != {len(lowerCAmelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A__ : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A__ : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
'''simple docstring'''
try:
UpperCAmelCase__ : Tuple = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCAmelCase ) )
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Tuple ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( lowerCAmelCase : Union[str, PreTrainedModel] , lowerCAmelCase : Union[str, Path] = "student" , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"teacher must be a model or string got type {type(lowerCAmelCase )}"
UpperCAmelCase__ : int = teacher.config.to_diff_dict()
try:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase__ : Tuple = teacher_e
if d is None:
UpperCAmelCase__ : str = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase__ : Optional[Any] = teacher_e
if d is None:
UpperCAmelCase__ : Optional[Any] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
UpperCAmelCase__ : Tuple = teacher.config_class(**lowerCAmelCase )
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase__ : Optional[int] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase__ , UpperCAmelCase__ : int = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
UpperCAmelCase__ : int = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 660 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Optional[Any] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
A__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 660 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Tuple = ort.SessionOptions()
UpperCAmelCase__ : List[str] = False
return options
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : Tuple = np.random.RandomState(0 )
UpperCAmelCase__ : Any = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images
UpperCAmelCase__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : List[str] = np.random.RandomState(0 )
UpperCAmelCase__ : str = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : int = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 660 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A__ : int = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = ["""DeiTFeatureExtractor"""]
A__ : Union[str, Any] = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
A__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 660 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'table-transformer'
_A = ['past_key_values']
_A = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=1_00 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , )-> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase__ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : int = backbone_config.get("model_type" )
UpperCAmelCase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : int = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = None, None, None
UpperCAmelCase__ : Optional[int] = use_timm_backbone
UpperCAmelCase__ : Dict = backbone_config
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = num_queries
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : Optional[int] = encoder_ffn_dim
UpperCAmelCase__ : str = encoder_layers
UpperCAmelCase__ : Dict = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : List[str] = dropout
UpperCAmelCase__ : Tuple = attention_dropout
UpperCAmelCase__ : List[Any] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : List[str] = init_xavier_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Dict = auxiliary_loss
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : List[str] = backbone
UpperCAmelCase__ : List[Any] = use_pretrained_backbone
UpperCAmelCase__ : List[str] = dilation
# Hungarian matcher
UpperCAmelCase__ : Dict = class_cost
UpperCAmelCase__ : Any = bbox_cost
UpperCAmelCase__ : Tuple = giou_cost
# Loss coefficients
UpperCAmelCase__ : Any = mask_loss_coefficient
UpperCAmelCase__ : Dict = dice_loss_coefficient
UpperCAmelCase__ : Any = bbox_loss_coefficient
UpperCAmelCase__ : Tuple = giou_loss_coefficient
UpperCAmelCase__ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self )-> int:
return self.d_model
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-5
@property
def lowerCAmelCase__ ( self )-> int:
return 12
| 660 | 1 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
A__ : List[str] = logging.get_logger(__name__)
A__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
A__ : List[Any] = {
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
A__ : int = {
"""Salesforce/codegen-350M-mono""": 2_048,
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'attention_mask']
_A = CodeGenTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="<|endoftext|>" , __UpperCamelCase="<|endoftext|>" , __UpperCamelCase="<|endoftext|>" , __UpperCamelCase=False , **__UpperCamelCase , )-> Dict:
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
if kwargs.pop("add_bos_token" , __UpperCamelCase ):
UpperCAmelCase__ : Any = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
F"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
F"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
UpperCAmelCase__ : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
UpperCAmelCase__ : Union[str, Any] = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
UpperCAmelCase__ : Union[str, Any] = add_prefix_space
UpperCAmelCase__ : str = pre_tok_class(**__UpperCamelCase )
UpperCAmelCase__ : int = add_prefix_space
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> BatchEncoding:
UpperCAmelCase__ : Optional[Any] = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> BatchEncoding:
UpperCAmelCase__ : Optional[int] = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> Tuple[str]:
UpperCAmelCase__ : Dict = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
UpperCAmelCase__ : int = super().decode(
token_ids=__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , **__UpperCamelCase , )
if truncate_before_pattern is not None and len(__UpperCamelCase ) > 0:
UpperCAmelCase__ : Tuple = self.truncate(__UpperCamelCase , __UpperCamelCase )
return decoded_text
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> Any:
def find_re(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Dict = pattern.search(__UpperCamelCase , __UpperCamelCase )
return m.start() if m else -1
UpperCAmelCase__ : str = [re.compile(__UpperCamelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
UpperCAmelCase__ : List[str] = list(re.finditer("^print" , __UpperCamelCase , re.MULTILINE ) )
if len(__UpperCamelCase ) > 1:
UpperCAmelCase__ : Any = completion[: prints[1].start()]
UpperCAmelCase__ : Optional[Any] = list(re.finditer("^def" , __UpperCamelCase , re.MULTILINE ) )
if len(__UpperCamelCase ) > 1:
UpperCAmelCase__ : Dict = completion[: defs[1].start()]
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Any = [
pos for pos in [find_re(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for terminal in terminals] if pos != -1
]
if len(__UpperCamelCase ) > 0:
return completion[: min(__UpperCamelCase )]
else:
return completion
| 660 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
A__ : int = getLogger(__name__)
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 1024 , lowerCAmelCase : List[Any]="val" , lowerCAmelCase : str=None , lowerCAmelCase : int=False , lowerCAmelCase : Dict="summarization" , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Dict = None , lowerCAmelCase : List[str]="" , **lowerCAmelCase : int , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = str(lowerCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=lowerCAmelCase )
UpperCAmelCase__ : List[str] = Path(lowerCAmelCase )
UpperCAmelCase__ : str = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).cuda()
if fpaa:
UpperCAmelCase__ : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase , lowerCAmelCase ) # update config with task specific params
UpperCAmelCase__ : List[Any] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase__ : Any = num_return_sequences
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase__ : int = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase__ : Union[str, Any] = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase__ : str = SeqaSeqDataset(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , max_target_length=1024 , type_path=lowerCAmelCase , n_obs=lowerCAmelCase , prefix=lowerCAmelCase , **lowerCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase__ : Union[str, Any] = ds.make_sortish_sampler(lowerCAmelCase , distributed=lowerCAmelCase , add_extra_examples=lowerCAmelCase , shuffle=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = DataLoader(lowerCAmelCase , sampler=lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn )
UpperCAmelCase__ : str = []
for batch in tqdm(lowerCAmelCase ):
UpperCAmelCase__ : Dict = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=lowerCAmelCase , num_beams=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase__ : int = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
UpperCAmelCase__ : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase__ : str = chunks(lowerCAmelCase , lowerCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(lowerCAmelCase , lowerCAmelCase )
return results, sampler.num_replicas
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=lowerCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=lowerCAmelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=lowerCAmelCase , default=lowerCAmelCase )
parser.add_argument(
"--type_path" , type=lowerCAmelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=lowerCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowerCAmelCase , default=8 , required=lowerCAmelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=lowerCAmelCase , default=-1 , required=lowerCAmelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=lowerCAmelCase , default=1 , required=lowerCAmelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=lowerCAmelCase , default=600 , required=lowerCAmelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument(
"--prefix" , type=lowerCAmelCase , required=lowerCAmelCase , default=lowerCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase__ : Optional[int] = time.time()
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = parser.parse_known_args()
UpperCAmelCase__ : int = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase__ : Dict = Path(args.save_dir + "_tmp" )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) # this handles locking.
UpperCAmelCase__ : List[str] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase__ : List[str] = {}
if args.src_lang is not None:
UpperCAmelCase__ : str = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase__ : List[str] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = eval_data_dir(
args.data_dir , lowerCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase , **lowerCAmelCase , )
if args.local_rank <= 0:
UpperCAmelCase__ : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ : Tuple = gather_results_from_each_node(lowerCAmelCase , lowerCAmelCase , args.sync_timeout )
UpperCAmelCase__ : Union[str, Any] = combine_partial_results(lowerCAmelCase )
if args.num_return_sequences > 1:
UpperCAmelCase__ : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowerCAmelCase , lowerCAmelCase )
return
UpperCAmelCase__ : Optional[Any] = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(lowerCAmelCase ) as f:
UpperCAmelCase__ : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase__ : List[Any] = "translation" in args.task
UpperCAmelCase__ : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase__ : Optional[Any] = "bleu" if calc_bleu else "rouge"
UpperCAmelCase__ : Dict = score_fn(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = time.time() - start_time
UpperCAmelCase__ : Optional[int] = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase__ : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase__ : Any = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
print(lowerCAmelCase )
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(lowerCAmelCase )
def a__ ( lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = []
for partial_result in partial_results:
records.extend(lowerCAmelCase )
UpperCAmelCase__ : Dict = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x["id"] )
UpperCAmelCase__ : List[str] = [x["pred"] for x in records]
return preds
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
# WAIT FOR lots of .json files
UpperCAmelCase__ : int = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase__ : Dict = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase__ : str = list(save_dir.glob("rank_*.json" ) )
if len(lowerCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase__ : Union[str, Any] = lmap(lowerCAmelCase , lowerCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 660 | 1 |
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 660 |
"""simple docstring"""
from timeit import timeit
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ):
'''simple docstring'''
def do_benchmark(lowerCAmelCase : int ) -> None:
UpperCAmelCase__ : Dict = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Tuple = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowerCAmelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Any = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowerCAmelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , )-> str:
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : Tuple = 13
UpperCAmelCase__ : Optional[int] = 7
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : str = True
UpperCAmelCase__ : str = 99
UpperCAmelCase__ : Tuple = 32
UpperCAmelCase__ : Optional[int] = 2
UpperCAmelCase__ : List[str] = 4
UpperCAmelCase__ : str = 37
UpperCAmelCase__ : List[Any] = "gelu"
UpperCAmelCase__ : Union[str, Any] = 0.1
UpperCAmelCase__ : Tuple = 0.1
UpperCAmelCase__ : Union[str, Any] = 5_12
UpperCAmelCase__ : Union[str, Any] = 16
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : str = 0.02
UpperCAmelCase__ : Any = 3
UpperCAmelCase__ : Optional[int] = 4
UpperCAmelCase__ : Dict = None
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : str = None
if self.use_input_mask:
UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Tuple = None
if self.use_labels:
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : Dict = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> str:
UpperCAmelCase__ : Union[str, Any] = TFDistilBertModel(config=__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ : Any = model(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = [input_ids, input_mask]
UpperCAmelCase__ : Optional[int] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
UpperCAmelCase__ : int = TFDistilBertForMaskedLM(config=__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
UpperCAmelCase__ : Tuple = TFDistilBertForQuestionAnswering(config=__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
UpperCAmelCase__ : Any = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCAmelCase__ : Optional[int] = self.num_labels
UpperCAmelCase__ : List[str] = TFDistilBertForSequenceClassification(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Any:
UpperCAmelCase__ : List[Any] = self.num_choices
UpperCAmelCase__ : Dict = TFDistilBertForMultipleChoice(__UpperCamelCase )
UpperCAmelCase__ : Dict = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ : str = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ : List[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
UpperCAmelCase__ : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Tuple:
UpperCAmelCase__ : int = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFDistilBertForTokenClassification(__UpperCamelCase )
UpperCAmelCase__ : str = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ : int = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : Any = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : Optional[Any] = config_and_inputs
UpperCAmelCase__ : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_A = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : Tuple = TFDistilBertModelTester(self )
UpperCAmelCase__ : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , dim=37 )
def lowerCAmelCase__ ( self )-> str:
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__UpperCamelCase )
@slow
def lowerCAmelCase__ ( self )-> Optional[Any]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
UpperCAmelCase__ : int = TFDistilBertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : str = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase__ : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ : Tuple = model(__UpperCamelCase )[0]
UpperCAmelCase__ : Optional[int] = [1, 6, 7_68]
self.assertEqual(output.shape , __UpperCamelCase )
UpperCAmelCase__ : str = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 )
| 660 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowercase ( unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[Any] = load_tool("text-classification" )
self.tool.setup()
UpperCAmelCase__ : List[str] = load_tool("text-classification" , remote=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[Any] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Any = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : str = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
| 660 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
A__ : int = """\
"""
A__ : Optional[Any] = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
A__ : List[str] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = True , __UpperCamelCase=None )-> Optional[int]:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ : str = "cuda"
else:
UpperCAmelCase__ : List[str] = "cuda" if torch.cuda.is_available() else "cpu"
UpperCAmelCase__ : int = AutoModelForCausalLM.from_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Dict = model.to(__UpperCamelCase )
UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained(__UpperCamelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ : Dict = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__UpperCamelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ : List[Any] = model.config.max_length - 1
else:
UpperCAmelCase__ : Tuple = model.config.max_length
UpperCAmelCase__ : Union[str, Any] = tokenizer(
__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors="pt" , return_attention_mask=__UpperCamelCase , ).to(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = encodings["input_ids"]
UpperCAmelCase__ : str = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Any = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(__UpperCamelCase ) , __UpperCamelCase ) ):
UpperCAmelCase__ : Any = min(start_index + batch_size , len(__UpperCamelCase ) )
UpperCAmelCase__ : List[Any] = encoded_texts[start_index:end_index]
UpperCAmelCase__ : Union[str, Any] = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ : Union[str, Any] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCamelCase )
UpperCAmelCase__ : Tuple = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCAmelCase__ : List[Any] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCamelCase ), attn_mask] , dim=1 )
UpperCAmelCase__ : str = encoded_batch
with torch.no_grad():
UpperCAmelCase__ : Dict = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).logits
UpperCAmelCase__ : Any = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ : Optional[Any] = labels[..., 1:].contiguous()
UpperCAmelCase__ : Union[str, Any] = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ : List[Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCamelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCamelCase )}
| 660 |
"""simple docstring"""
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ) ) )
def a__ ( lowerCAmelCase : list[float] ):
'''simple docstring'''
if point:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
for item in point:
if not isinstance(lowerCAmelCase , (int, float) ):
UpperCAmelCase__ : Tuple = (
"Expected a list of numbers as input, found "
F"{type(lowerCAmelCase ).__name__}"
)
raise TypeError(lowerCAmelCase )
else:
UpperCAmelCase__ : Dict = F"Expected a list of numbers as input, found {type(lowerCAmelCase ).__name__}"
raise TypeError(lowerCAmelCase )
else:
raise ValueError("Missing an input" )
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase , lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 1 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
A__ : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class _lowercase ( datasets.BuilderConfig ):
'''simple docstring'''
_A = 1_0000
_A = None
_A = None
class _lowercase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
_A = ParquetConfig
def lowerCAmelCase__ ( self )-> List[str]:
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
UpperCAmelCase__ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__UpperCamelCase , (str, list, tuple) ):
UpperCAmelCase__ : List[Any] = data_files
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase__ : Any = [dl_manager.iter_files(__UpperCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase__ : Any = []
for split_name, files in data_files.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase__ : Any = [dl_manager.iter_files(__UpperCamelCase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__UpperCamelCase ):
with open(__UpperCamelCase , "rb" ) as f:
UpperCAmelCase__ : Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCamelCase ) )
break
splits.append(datasets.SplitGenerator(name=__UpperCamelCase , gen_kwargs={"files": files} ) )
return splits
def lowerCAmelCase__ ( self , __UpperCamelCase )-> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase__ : Union[str, Any] = table_cast(__UpperCamelCase , self.info.features.arrow_schema )
return pa_table
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : Union[str, Any] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCamelCase ) ):
with open(__UpperCamelCase , "rb" ) as f:
UpperCAmelCase__ : Any = pq.ParquetFile(__UpperCamelCase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCAmelCase__ : Union[str, Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(__UpperCamelCase )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}" )
raise
| 660 |
"""simple docstring"""
import math
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( lowerCAmelCase : int = 1_0001 ):
'''simple docstring'''
try:
UpperCAmelCase__ : List[str] = int(lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
UpperCAmelCase__ : list[int] = []
UpperCAmelCase__ : str = 2
while len(lowerCAmelCase ) < nth:
if is_prime(lowerCAmelCase ):
primes.append(lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase : list ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = len(lowerCAmelCase )
for i in range(1 , lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = collection[i]
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : str = i - 1
while low <= high:
UpperCAmelCase__ : List[Any] = (low + high) // 2
if val < collection[mid]:
UpperCAmelCase__ : int = mid - 1
else:
UpperCAmelCase__ : Optional[int] = mid + 1
for j in range(lowerCAmelCase , lowerCAmelCase , -1 ):
UpperCAmelCase__ : Tuple = collection[j - 1]
UpperCAmelCase__ : Dict = val
return collection
if __name__ == "__main__":
A__ : Any = input("""Enter numbers separated by a comma:\n""").strip()
A__ : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 660 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=0.6 , __UpperCamelCase=None , )-> List[Any]:
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : int = mask_ratio
UpperCAmelCase__ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self )-> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = ViTMAEModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[Any] = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCAmelCase__ : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_A = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = ViTMAEModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self )-> Dict:
pass
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Dict = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCAmelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ : str = torch.from_numpy(__UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase__ : Optional[Any] = pt_noise
super().check_pt_tf_models(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[Any] = outputs[0].cpu().numpy()
UpperCAmelCase__ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model_class.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
# Make sure we don't have nans
UpperCAmelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> List[str]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Any:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Optional[Any]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self )-> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
pass
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ViTMAEModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self )-> List[Any]:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self )-> Optional[int]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase__ : Any = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCamelCase )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase__ : List[Any] = ViTMAEConfig()
UpperCAmelCase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase__ : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**__UpperCamelCase , noise=torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase ) )
# verify the logits
UpperCAmelCase__ : str = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCamelCase ) , atol=1E-4 ) )
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
A__ : List[str] = TypeVar("""T""")
class _lowercase ( Generic[T] ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase )-> None:
UpperCAmelCase__ : Any | T = None
UpperCAmelCase__ : int = len(__UpperCamelCase )
UpperCAmelCase__ : list[T] = [any_type for _ in range(self.N )] + arr
UpperCAmelCase__ : Tuple = fnc
self.build()
def lowerCAmelCase__ ( self )-> None:
for p in range(self.N - 1 , 0 , -1 ):
UpperCAmelCase__ : Dict = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> None:
p += self.N
UpperCAmelCase__ : Any = v
while p > 1:
UpperCAmelCase__ : str = p // 2
UpperCAmelCase__ : Tuple = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> T | None: # noqa: E741
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = l + self.N, r + self.N
UpperCAmelCase__ : T | None = None
while l <= r:
if l % 2 == 1:
UpperCAmelCase__ : Union[str, Any] = self.st[l] if res is None else self.fn(__UpperCamelCase , self.st[l] )
if r % 2 == 0:
UpperCAmelCase__ : Any = self.st[r] if res is None else self.fn(__UpperCamelCase , self.st[r] )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
A__ : Optional[Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
A__ : List[str] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
A__ : str = SegmentTree(test_array, min)
A__ : Tuple = SegmentTree(test_array, max)
A__ : List[str] = SegmentTree(test_array, lambda a, b: a + b)
def a__ ( ):
'''simple docstring'''
for i in range(len(lowerCAmelCase ) ):
for j in range(lowerCAmelCase , len(lowerCAmelCase ) ):
UpperCAmelCase__ : int = reduce(lowerCAmelCase , test_array[i : j + 1] )
UpperCAmelCase__ : Tuple = reduce(lowerCAmelCase , test_array[i : j + 1] )
UpperCAmelCase__ : List[Any] = reduce(lambda lowerCAmelCase , lowerCAmelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCAmelCase , lowerCAmelCase )
assert max_range == max_segment_tree.query(lowerCAmelCase , lowerCAmelCase )
assert sum_range == sum_segment_tree.query(lowerCAmelCase , lowerCAmelCase )
test_all_segments()
for index, value in test_updates.items():
A__ : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 660 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowercase :
'''simple docstring'''
_A = 42
# setable values
_A = 42
_A = 42
_A = None
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
return cls(common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase )
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_A = [e.name for e in FlaxKarrasDiffusionSchedulers]
_A = 42
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return True
@register_to_config
def __init__( self , __UpperCamelCase = 10_00 , __UpperCamelCase = 0.0001 , __UpperCamelCase = 0.02 , __UpperCamelCase = "linear" , __UpperCamelCase = None , __UpperCamelCase = "fixed_small" , __UpperCamelCase = True , __UpperCamelCase = "epsilon" , __UpperCamelCase = jnp.floataa , )-> List[str]:
UpperCAmelCase__ : int = dtype
def lowerCAmelCase__ ( self , __UpperCamelCase = None )-> DDPMSchedulerState:
if common is None:
UpperCAmelCase__ : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None )-> jnp.ndarray:
return sample
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = () )-> DDPMSchedulerState:
UpperCAmelCase__ : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Optional[int] = (jnp.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : Any = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : Dict = jnp.clip(__UpperCamelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Tuple = jnp.log(jnp.clip(__UpperCamelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
UpperCAmelCase__ : Optional[int] = (predicted_variance + 1) / 2
UpperCAmelCase__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , )-> Union[FlaxDDPMSchedulerOutput, Tuple]:
UpperCAmelCase__ : List[str] = timestep
if key is None:
UpperCAmelCase__ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = jnp.split(__UpperCamelCase , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : Optional[Any] = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : Union[str, Any] = 1 - alpha_prod_t
UpperCAmelCase__ : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : Any = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : List[Any] = jnp.clip(__UpperCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : Any = jax.random.split(__UpperCamelCase , num=1 )
UpperCAmelCase__ : int = jax.random.normal(__UpperCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__UpperCamelCase , __UpperCamelCase , predicted_variance=__UpperCamelCase ) ** 0.5) * noise
UpperCAmelCase__ : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__UpperCamelCase , state=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return add_noise_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return get_velocity_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __len__( self )-> Tuple:
return self.config.num_train_timesteps
| 660 | 1 |
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
A__ : Union[str, Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
A__ : List[Any] = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
A__ : Tuple = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase="uniform_average" , __UpperCamelCase=True )-> int:
UpperCAmelCase__ : List[str] = mean_squared_error(
__UpperCamelCase , __UpperCamelCase , sample_weight=__UpperCamelCase , multioutput=__UpperCamelCase , squared=__UpperCamelCase )
return {"mse": mse}
| 660 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ''
_A = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
super().__init__(self , **__UpperCamelCase )
UpperCAmelCase__ : int = repo_info
UpperCAmelCase__ : Optional[int] = token
UpperCAmelCase__ : Optional[Any] = None
def lowerCAmelCase__ ( self )-> Optional[Any]:
if self.dir_cache is None:
UpperCAmelCase__ : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ : str = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"name": str(__UpperCamelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , )-> List[Any]:
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> List[str]:
self._get_dirs()
UpperCAmelCase__ : Union[str, Any] = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase )-> str:
self._get_dirs()
UpperCAmelCase__ : str = PurePosixPath(path.strip("/" ) )
UpperCAmelCase__ : Optional[Any] = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ : Optional[int] = PurePosixPath(p.strip("/" ) )
UpperCAmelCase__ : Dict = p.parent
if root == path:
UpperCAmelCase__ : Tuple = f
UpperCAmelCase__ : List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 1 |
"""simple docstring"""
import numpy as np
A__ : Tuple = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class _lowercase :
'''simple docstring'''
def __init__( self )-> None:
UpperCAmelCase__ : List[Any] = np.array(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> np.ndarray:
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = np.where(letter == self.SQUARE )
UpperCAmelCase__ : str = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> str:
UpperCAmelCase__ : str = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
UpperCAmelCase__ : Union[str, Any] = message.lower()
UpperCAmelCase__ : str = message.replace(" " , "" )
UpperCAmelCase__ : str = message.replace("j" , "i" )
UpperCAmelCase__ : Tuple = np.empty((2, len(__UpperCamelCase )) )
for letter_index in range(len(__UpperCamelCase ) ):
UpperCAmelCase__ : Any = self.letter_to_numbers(message[letter_index] )
UpperCAmelCase__ : Optional[Any] = numbers[0]
UpperCAmelCase__ : Tuple = numbers[1]
UpperCAmelCase__ : Union[str, Any] = first_step.reshape(2 * len(__UpperCamelCase ) )
UpperCAmelCase__ : Tuple = ""
for numbers_index in range(len(__UpperCamelCase ) ):
UpperCAmelCase__ : Tuple = int(second_step[numbers_index * 2] )
UpperCAmelCase__ : Optional[int] = int(second_step[(numbers_index * 2) + 1] )
UpperCAmelCase__ : List[str] = self.numbers_to_letter(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = encoded_message + letter
return encoded_message
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
UpperCAmelCase__ : Any = message.lower()
message.replace(" " , "" )
UpperCAmelCase__ : List[Any] = np.empty(2 * len(__UpperCamelCase ) )
for letter_index in range(len(__UpperCamelCase ) ):
UpperCAmelCase__ : Optional[int] = self.letter_to_numbers(message[letter_index] )
UpperCAmelCase__ : Union[str, Any] = numbers[0]
UpperCAmelCase__ : Optional[Any] = numbers[1]
UpperCAmelCase__ : Union[str, Any] = first_step.reshape((2, len(__UpperCamelCase )) )
UpperCAmelCase__ : int = ""
for numbers_index in range(len(__UpperCamelCase ) ):
UpperCAmelCase__ : Any = int(second_step[0, numbers_index] )
UpperCAmelCase__ : Optional[int] = int(second_step[1, numbers_index] )
UpperCAmelCase__ : Dict = self.numbers_to_letter(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Dict = decoded_message + letter
return decoded_message
| 660 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : Dict = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['pixel_values']
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase__ : List[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase__ : int = get_size_dict(__UpperCamelCase , param_name="crop_size" )
UpperCAmelCase__ : Dict = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : List[Any] = do_center_crop
UpperCAmelCase__ : str = crop_size
UpperCAmelCase__ : Optional[int] = resample
UpperCAmelCase__ : int = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : Union[str, Any] = offset
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(__UpperCamelCase , size["shortest_edge"] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
UpperCAmelCase__ : Any = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[Any] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> Tuple:
UpperCAmelCase__ : str = image.astype(np.floataa )
if offset:
UpperCAmelCase__ : Tuple = image - (scale / 2)
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , )-> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[Any] = to_numpy_array(__UpperCamelCase )
if do_resize:
UpperCAmelCase__ : Union[str, Any] = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
UpperCAmelCase__ : int = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
UpperCAmelCase__ : List[str] = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase , offset=__UpperCamelCase )
if do_normalize:
UpperCAmelCase__ : List[Any] = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
UpperCAmelCase__ : Dict = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> PIL.Image.Image:
UpperCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Optional[int] = offset if offset is not None else self.offset
UpperCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Tuple = get_size_dict(__UpperCamelCase , param_name="crop_size" )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase__ : List[str] = make_batched(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , offset=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase__ : Dict = {"pixel_values": videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 660 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'table-transformer'
_A = ['past_key_values']
_A = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=1_00 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , )-> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase__ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : int = backbone_config.get("model_type" )
UpperCAmelCase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : int = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = None, None, None
UpperCAmelCase__ : Optional[int] = use_timm_backbone
UpperCAmelCase__ : Dict = backbone_config
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = num_queries
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : Optional[int] = encoder_ffn_dim
UpperCAmelCase__ : str = encoder_layers
UpperCAmelCase__ : Dict = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : List[str] = dropout
UpperCAmelCase__ : Tuple = attention_dropout
UpperCAmelCase__ : List[Any] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : List[str] = init_xavier_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Dict = auxiliary_loss
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : List[str] = backbone
UpperCAmelCase__ : List[Any] = use_pretrained_backbone
UpperCAmelCase__ : List[str] = dilation
# Hungarian matcher
UpperCAmelCase__ : Dict = class_cost
UpperCAmelCase__ : Any = bbox_cost
UpperCAmelCase__ : Tuple = giou_cost
# Loss coefficients
UpperCAmelCase__ : Any = mask_loss_coefficient
UpperCAmelCase__ : Dict = dice_loss_coefficient
UpperCAmelCase__ : Any = bbox_loss_coefficient
UpperCAmelCase__ : Tuple = giou_loss_coefficient
UpperCAmelCase__ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self )-> int:
return self.d_model
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-5
@property
def lowerCAmelCase__ ( self )-> int:
return 12
| 660 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(lowerCAmelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase : list[int] ):
'''simple docstring'''
if len(lowerCAmelCase ) == 0:
return array
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = min(lowerCAmelCase ), max(lowerCAmelCase )
# Compute the variables
UpperCAmelCase__ : Tuple = _max - _min + 1
UpperCAmelCase__ , UpperCAmelCase__ : Dict = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCAmelCase__ : List[str] = i - _min
UpperCAmelCase__ : Union[str, Any] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCAmelCase__ : Union[str, Any] = 0
for i in range(lowerCAmelCase ):
while holes_repeat[i] > 0:
UpperCAmelCase__ : Optional[int] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : Any = input("""Enter numbers separated by comma:\n""")
A__ : str = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 660 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A__ : Optional[Any] = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : bool , lowerCAmelCase : bool ):
'''simple docstring'''
def run_func(lowerCAmelCase : Dict ):
@wraps(lowerCAmelCase )
def run_in_eager_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict ):
return func(*lowerCAmelCase , **lowerCAmelCase )
@wraps(lowerCAmelCase )
@tf.function(experimental_compile=lowerCAmelCase )
def run_in_graph_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any] ):
return func(*lowerCAmelCase , **lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a__ ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = random.Random()
UpperCAmelCase__ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = 42
_A = "TensorFlow"
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return tf.__version__
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
# initialize GPU on separate process
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Union[str, Any] = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
UpperCAmelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : List[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Any = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Optional[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : Union[str, Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Optional[int] = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : str = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : Any = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : List[Any] = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Dict = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : Any = TF_MODEL_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : int = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Optional[Any] = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , training=__UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__UpperCamelCase , training=__UpperCamelCase )
UpperCAmelCase__ : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : List[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Any = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : Any = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : int = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : int = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : Union[str, Any] = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Dict = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Union[str, Any] = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Any = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
UpperCAmelCase__ : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase__ ( self , __UpperCamelCase )-> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__UpperCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase__ : Optional[Any] = timeit.repeat(
__UpperCamelCase , repeat=self.args.repeat , number=10 , )
return min(__UpperCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCAmelCase__ : List[str] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCAmelCase__ : Optional[int] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(__UpperCamelCase )
UpperCAmelCase__ : str = meminfo.used
UpperCAmelCase__ : int = Memory(__UpperCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCAmelCase__ : Any = None
else:
UpperCAmelCase__ : List[Any] = measure_peak_memory_cpu(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = Memory(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase__ : Optional[Any] = stop_memory_tracing(__UpperCamelCase )
if memory is None:
UpperCAmelCase__ : Tuple = summary.total
else:
UpperCAmelCase__ : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 660 | 1 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _lowercase :
'''simple docstring'''
@staticmethod
def lowerCAmelCase__ ( *__UpperCamelCase , **__UpperCamelCase )-> str:
pass
def a__ ( lowerCAmelCase : Image ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def a__ ( lowerCAmelCase : Image ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = np.array(lowerCAmelCase )
UpperCAmelCase__ : List[str] = npimg.shape
return {"hash": hashimage(lowerCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
_A = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_A = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCAmelCase__ : Union[str, Any] = MaskGenerationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> int:
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def lowerCAmelCase__ ( self )-> List[str]:
pass
@slow
@require_torch
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Union[str, Any] = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
UpperCAmelCase__ : Any = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=2_56 )
# Shortening by hashing
UpperCAmelCase__ : Dict = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(__UpperCamelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_80, 6_40)}, "scores": 1.0444},
{"mask": {"hash": "6affa964c6", "shape": (4_80, 6_40)}, "scores": 1.021},
{"mask": {"hash": "dfe28a0388", "shape": (4_80, 6_40)}, "scores": 1.0167},
{"mask": {"hash": "c0a5f4a318", "shape": (4_80, 6_40)}, "scores": 1.0132},
{"mask": {"hash": "fe8065c197", "shape": (4_80, 6_40)}, "scores": 1.0053},
{"mask": {"hash": "e2d0b7a0b7", "shape": (4_80, 6_40)}, "scores": 0.9967},
{"mask": {"hash": "453c7844bd", "shape": (4_80, 6_40)}, "scores": 0.993},
{"mask": {"hash": "3d44f2926d", "shape": (4_80, 6_40)}, "scores": 0.9909},
{"mask": {"hash": "64033ddc3f", "shape": (4_80, 6_40)}, "scores": 0.9879},
{"mask": {"hash": "801064ff79", "shape": (4_80, 6_40)}, "scores": 0.9834},
{"mask": {"hash": "6172f276ef", "shape": (4_80, 6_40)}, "scores": 0.9716},
{"mask": {"hash": "b49e60e084", "shape": (4_80, 6_40)}, "scores": 0.9612},
{"mask": {"hash": "a811e775fd", "shape": (4_80, 6_40)}, "scores": 0.9599},
{"mask": {"hash": "a6a8ebcf4b", "shape": (4_80, 6_40)}, "scores": 0.9552},
{"mask": {"hash": "9d8257e080", "shape": (4_80, 6_40)}, "scores": 0.9532},
{"mask": {"hash": "32de6454a8", "shape": (4_80, 6_40)}, "scores": 0.9516},
{"mask": {"hash": "af3d4af2c8", "shape": (4_80, 6_40)}, "scores": 0.9499},
{"mask": {"hash": "3c6db475fb", "shape": (4_80, 6_40)}, "scores": 0.9483},
{"mask": {"hash": "c290813fb9", "shape": (4_80, 6_40)}, "scores": 0.9464},
{"mask": {"hash": "b6f0b8f606", "shape": (4_80, 6_40)}, "scores": 0.943},
{"mask": {"hash": "92ce16bfdf", "shape": (4_80, 6_40)}, "scores": 0.943},
{"mask": {"hash": "c749b25868", "shape": (4_80, 6_40)}, "scores": 0.9408},
{"mask": {"hash": "efb6cab859", "shape": (4_80, 6_40)}, "scores": 0.9335},
{"mask": {"hash": "1ff2eafb30", "shape": (4_80, 6_40)}, "scores": 0.9326},
{"mask": {"hash": "788b798e24", "shape": (4_80, 6_40)}, "scores": 0.9262},
{"mask": {"hash": "abea804f0e", "shape": (4_80, 6_40)}, "scores": 0.8999},
{"mask": {"hash": "7b9e8ddb73", "shape": (4_80, 6_40)}, "scores": 0.8986},
{"mask": {"hash": "cd24047c8a", "shape": (4_80, 6_40)}, "scores": 0.8984},
{"mask": {"hash": "6943e6bcbd", "shape": (4_80, 6_40)}, "scores": 0.8873},
{"mask": {"hash": "b5f47c9191", "shape": (4_80, 6_40)}, "scores": 0.8871}
] , )
# fmt: on
@require_torch
@slow
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : int = "facebook/sam-vit-huge"
UpperCAmelCase__ : Any = pipeline("mask-generation" , model=__UpperCamelCase )
UpperCAmelCase__ : List[str] = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
UpperCAmelCase__ : Optional[Any] = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(__UpperCamelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_80, 6_40)}, "scores": 1.0444},
{"mask": {"hash": "6affa964c6", "shape": (4_80, 6_40)}, "scores": 1.0210},
{"mask": {"hash": "dfe28a0388", "shape": (4_80, 6_40)}, "scores": 1.0167},
{"mask": {"hash": "c0a5f4a318", "shape": (4_80, 6_40)}, "scores": 1.0132},
{"mask": {"hash": "fe8065c197", "shape": (4_80, 6_40)}, "scores": 1.0053},
] , )
| 660 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
A__ : List[str] = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 660 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
_A = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_A = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
UpperCAmelCase__ : Optional[int] = AudioClassificationPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase )
# test with a raw waveform
UpperCAmelCase__ : Dict = np.zeros((3_40_00,) )
UpperCAmelCase__ : Optional[Any] = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = examples
UpperCAmelCase__ : List[str] = audio_classifier(__UpperCamelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
__UpperCamelCase , [
{"score": ANY(__UpperCamelCase ), "label": ANY(__UpperCamelCase )},
{"score": ANY(__UpperCamelCase ), "label": ANY(__UpperCamelCase )},
] , )
UpperCAmelCase__ : List[Any] = audio_classifier(__UpperCamelCase , top_k=1 )
self.assertEqual(
__UpperCamelCase , [
{"score": ANY(__UpperCamelCase ), "label": ANY(__UpperCamelCase )},
] , )
self.run_torchaudio(__UpperCamelCase )
@require_torchaudio
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
import datasets
# test with a local file
UpperCAmelCase__ : Optional[Any] = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
UpperCAmelCase__ : Dict = dataset[0]["audio"]["array"]
UpperCAmelCase__ : Optional[Any] = audio_classifier(__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
{"score": ANY(__UpperCamelCase ), "label": ANY(__UpperCamelCase )},
{"score": ANY(__UpperCamelCase ), "label": ANY(__UpperCamelCase )},
] , )
@require_torch
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : List[Any] = "anton-l/wav2vec2-random-tiny-classifier"
UpperCAmelCase__ : Union[str, Any] = pipeline("audio-classification" , model=__UpperCamelCase )
UpperCAmelCase__ : Dict = np.ones((80_00,) )
UpperCAmelCase__ : Tuple = audio_classifier(__UpperCamelCase , top_k=4 )
UpperCAmelCase__ : Any = [
{"score": 0.0842, "label": "no"},
{"score": 0.0838, "label": "up"},
{"score": 0.0837, "label": "go"},
{"score": 0.0834, "label": "right"},
]
UpperCAmelCase__ : Dict = [
{"score": 0.0845, "label": "stop"},
{"score": 0.0844, "label": "on"},
{"score": 0.0841, "label": "right"},
{"score": 0.0834, "label": "left"},
]
self.assertIn(nested_simplify(__UpperCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
UpperCAmelCase__ : List[str] = {"array": np.ones((80_00,) ), "sampling_rate": audio_classifier.feature_extractor.sampling_rate}
UpperCAmelCase__ : Tuple = audio_classifier(__UpperCamelCase , top_k=4 )
self.assertIn(nested_simplify(__UpperCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
import datasets
UpperCAmelCase__ : Dict = "superb/wav2vec2-base-superb-ks"
UpperCAmelCase__ : Dict = pipeline("audio-classification" , model=__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = datasets.load_dataset("anton-l/superb_dummy" , "ks" , split="test" )
UpperCAmelCase__ : List[str] = np.array(dataset[3]["speech"] , dtype=np.floataa )
UpperCAmelCase__ : Optional[Any] = audio_classifier(__UpperCamelCase , top_k=4 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=3 ) , [
{"score": 0.981, "label": "go"},
{"score": 0.007, "label": "up"},
{"score": 0.006, "label": "_unknown_"},
{"score": 0.001, "label": "down"},
] , )
@require_tf
@unittest.skip("Audio classification is not implemented for TF" )
def lowerCAmelCase__ ( self )-> Dict:
pass
| 660 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , __UpperCamelCase = 7_68 , )-> Union[str, Any]:
super().__init__()
UpperCAmelCase__ : str = nn.Parameter(torch.zeros(1 , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[int] = nn.Parameter(torch.ones(1 , __UpperCamelCase ) )
def lowerCAmelCase__ ( self , __UpperCamelCase = None , __UpperCamelCase = None , )-> Any:
UpperCAmelCase__ : Dict = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) )
UpperCAmelCase__ : Any = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) )
return self
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : Any = (embeds * self.std) + self.mean
return embeds
| 660 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = BioGptTokenizer
_A = False
def lowerCAmelCase__ ( self )-> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCAmelCase__ : int = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
UpperCAmelCase__ : Optional[Any] = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCAmelCase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(__UpperCamelCase ) )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Dict = "lower newer"
UpperCAmelCase__ : List[Any] = "lower newer"
return input_text, output_text
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : str = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ : Any = "lower"
UpperCAmelCase__ : Optional[int] = ["low", "er</w>"]
UpperCAmelCase__ : List[Any] = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = tokens + ["<unk>"]
UpperCAmelCase__ : int = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
@slow
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : str = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
UpperCAmelCase__ : Any = tokenizer.encode("sequence builders" , add_special_tokens=__UpperCamelCase )
UpperCAmelCase__ : List[str] = tokenizer.encode("multi-sequence build" , add_special_tokens=__UpperCamelCase )
UpperCAmelCase__ : int = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 660 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
'''simple docstring'''
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[int] = GPTaConfig()
else:
UpperCAmelCase__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = GPTaModel(lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
UpperCAmelCase__ : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase__ : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
A__ : Optional[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowercase ( unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[Any] = load_tool("text-classification" )
self.tool.setup()
UpperCAmelCase__ : List[str] = load_tool("text-classification" , remote=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[Any] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Any = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : str = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
| 660 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A__ : Optional[int] = ["""small""", """medium""", """large"""]
A__ : Optional[int] = """lm_head.decoder.weight"""
A__ : Dict = """lm_head.weight"""
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.load(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = d.pop(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
torch.save(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
A__ : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A__ : Tuple = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
A__ : str = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def a__ ( lowerCAmelCase : str , lowerCAmelCase : float | Decimal , lowerCAmelCase : float = 10**-10 ):
'''simple docstring'''
UpperCAmelCase__ : str = a
while True:
UpperCAmelCase__ : Tuple = Decimal(lowerCAmelCase ) - (
Decimal(eval(lowerCAmelCase ) ) / Decimal(eval(str(diff(lowerCAmelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowerCAmelCase ) ) < precision: # noqa: S307
return float(lowerCAmelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
print(f"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""")
# Find Square Root of 5
print(f"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""")
# Exponential Roots
print(f"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
| 660 |
"""simple docstring"""
from math import isqrt
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = False
return [i for i in range(2 , lowerCAmelCase ) if is_prime[i]]
def a__ ( lowerCAmelCase : int = 10**8 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = len(lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 1 |
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def a__ ( lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(repo_id=lowerCAmelCase , path=lowerCAmelCase , revision=lowerCAmelCase )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(lowerCAmelCase )}"
| 660 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = np.full((len(lowerCAmelCase ), sequence_length, 2) , lowerCAmelCase )
else:
UpperCAmelCase__ : Optional[Any] = np.full((len(lowerCAmelCase ), sequence_length) , lowerCAmelCase )
for i, tensor in enumerate(lowerCAmelCase ):
if padding_side == "right":
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Dict = tensor[:sequence_length]
else:
UpperCAmelCase__ : Tuple = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase__ : int = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ord(lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase__ : Optional[Any] = unicodedata.category(lowerCAmelCase )
if cat.startswith("P" ):
return True
return False
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = True
_A = None
_A = None
_A = -100
_A = "pt"
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
import torch
UpperCAmelCase__ : Optional[Any] = "label" if "label" in features[0].keys() else "labels"
UpperCAmelCase__ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase__ : str = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch["entity_ids"] ).shape[1]
UpperCAmelCase__ : int = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase__ : int = [
list(__UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) for label in labels
]
else:
UpperCAmelCase__ : List[Any] = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) + list(__UpperCamelCase ) for label in labels
]
UpperCAmelCase__ : Optional[Any] = [feature["ner_tags"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , -1 , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = [feature["original_entity_spans"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , (-1, -1) , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {k: torch.tensor(__UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 660 | 1 |
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Optional[int] = """▁"""
A__ : Any = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
A__ : Dict = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
A__ : Union[str, Any] = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
A__ : int = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
A__ : List[Any] = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ["input_ids"]
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = RESOURCE_FILES_NAMES
def __init__( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase="utf8" , __UpperCamelCase="[UNK]" , __UpperCamelCase="[SEP]" , __UpperCamelCase="[PAD]" , __UpperCamelCase="[CLS]" , __UpperCamelCase="[MASK]" , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase__ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , vocab_file=__UpperCamelCase , encoding=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
UpperCAmelCase__ : Dict = do_lower_case
UpperCAmelCase__ : Dict = sentencepiece_model_ckpt
UpperCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase__ : List[str] = self.load_vocab(filepath=__UpperCamelCase )
else:
UpperCAmelCase__ : str = {self.sp_model.id_to_piece(__UpperCamelCase ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase__ : Tuple = {v: k for k, v in self.vocab.items()}
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
if text is None:
return None
UpperCAmelCase__ : Dict = self.tokenize(__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = "", []
for i, ch in enumerate(__UpperCamelCase ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase__ : Any = self.SP_CHAR_MAPPING.get(__UpperCamelCase )
else:
UpperCAmelCase__ : Union[str, Any] = unicodedata.normalize("NFKC" , __UpperCamelCase )
if self.is_whitespace(__UpperCamelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__UpperCamelCase ) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase__ : Optional[Any] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase__ : Union[str, Any] = token[1:]
UpperCAmelCase__ : Optional[int] = text[offset:].index(__UpperCamelCase ) + offset
UpperCAmelCase__ : Optional[Any] = start + len(__UpperCamelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase__ : List[str] = end
return token_mapping
@property
def lowerCAmelCase__ ( self )-> int:
return len(self.vocab )
def lowerCAmelCase__ ( self )-> Optional[int]:
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self )-> Dict:
UpperCAmelCase__ : Any = self.__dict__.copy()
UpperCAmelCase__ : List[Any] = None
return state
def __setstate__( self , __UpperCamelCase )-> Optional[Any]:
UpperCAmelCase__ : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
return "".join((self.SP_CHAR_MAPPING.get(__UpperCamelCase , __UpperCamelCase ) for c in text) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=64 , __UpperCamelCase=0.1 )-> str:
if self.sp_model_kwargs.get("enable_sampling" ) is True:
UpperCAmelCase__ : Union[str, Any] = True
if self.sp_model_kwargs.get("alpha" ) is not None:
UpperCAmelCase__ : Any = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
UpperCAmelCase__ : Tuple = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
UpperCAmelCase__ : List[str] = self.sp_model.EncodeAsPieces(__UpperCamelCase )
else:
UpperCAmelCase__ : Tuple = self.sp_model.SampleEncodeAsPieces(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[str] = []
for pi, piece in enumerate(__UpperCamelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__UpperCamelCase ) and pi != 0:
new_pieces.append(__UpperCamelCase )
continue
else:
continue
UpperCAmelCase__ : Dict = 0
for i, chunk in enumerate(__UpperCamelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__UpperCamelCase ) or self.is_punct(__UpperCamelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__UpperCamelCase )
UpperCAmelCase__ : Any = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase__ : Union[str, Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase__ : List[Any] = i
if len(__UpperCamelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Tuple:
UpperCAmelCase__ : str = "".join(__UpperCamelCase ).replace(__UpperCamelCase , " " ).strip()
return out_string
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
UpperCAmelCase__ : Optional[int] = self.convert_ids_to_tokens(__UpperCamelCase )
UpperCAmelCase__ : str = "".join(__UpperCamelCase ).replace(__UpperCamelCase , " " ).strip()
return out_string
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Any:
return self.vocab.get(__UpperCamelCase , self.vocab.get(self.unk_token ) )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
return self.reverse_vocab.get(__UpperCamelCase , self.unk_token )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=None )-> Tuple:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : Optional[int] = [self.cls_token_id]
UpperCAmelCase__ : Any = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=None )-> int:
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=False )-> str:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1]
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[int]:
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__UpperCamelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__UpperCamelCase ) + 1) + [1] * (len(__UpperCamelCase ) + 3)
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
if "\u4e00" <= char <= "\u9fff":
return True
return False
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__UpperCamelCase ) == 1:
UpperCAmelCase__ : Tuple = unicodedata.category(__UpperCamelCase )
if cat == "Zs":
return True
return False
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Dict:
UpperCAmelCase__ : int = {}
with io.open(__UpperCamelCase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : Any = line.rstrip("\n" )
UpperCAmelCase__ : Union[str, Any] = int(__UpperCamelCase )
return token_to_idx
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None )-> Tuple[str]:
UpperCAmelCase__ : Dict = 0
if os.path.isdir(__UpperCamelCase ):
UpperCAmelCase__ : Optional[int] = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
UpperCAmelCase__ : Tuple = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase__ : List[str] = token_index
writer.write(token + "\n" )
index += 1
UpperCAmelCase__ : Any = os.path.join(__UpperCamelCase , "sentencepiece.bpe.model" )
with open(__UpperCamelCase , "wb" ) as fi:
UpperCAmelCase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (vocab_file,)
| 660 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
def wrapper(*lowerCAmelCase : Any , **lowerCAmelCase : Tuple ):
UpperCAmelCase__ : Optional[int] = timeit.default_timer()
UpperCAmelCase__ : int = func(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : List[Any] = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : int = func.__name__
return wrapper
def a__ ( lowerCAmelCase : dict , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[Any] = seq_shapes or {}
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
UpperCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Dict = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase__ : str = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
UpperCAmelCase__ : List[str] = v.feature
UpperCAmelCase__ : Optional[int] = seq_shapes[k]
UpperCAmelCase__ : Optional[int] = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
UpperCAmelCase__ : Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=100 , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : List[Any] = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase__ : Optional[int] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 660 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Optional[int] = logging.get_logger(__name__)
A__ : Dict = {
"""kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""",
"""kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""",
"""kssteven/ibert-roberta-large-mnli""": (
"""https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"""
),
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'ibert'
def __init__( self , __UpperCamelCase=3_05_22 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase="absolute" , __UpperCamelCase=False , __UpperCamelCase="none" , **__UpperCamelCase , )-> Optional[int]:
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
UpperCAmelCase__ : List[str] = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : str = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : Tuple = hidden_dropout_prob
UpperCAmelCase__ : Dict = attention_probs_dropout_prob
UpperCAmelCase__ : Any = max_position_embeddings
UpperCAmelCase__ : Optional[int] = type_vocab_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : List[str] = layer_norm_eps
UpperCAmelCase__ : List[Any] = position_embedding_type
UpperCAmelCase__ : int = quant_mode
UpperCAmelCase__ : List[Any] = force_dequant
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase__ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase__ : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 660 |
"""simple docstring"""
from manim import *
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : str = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[Any] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : int = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("CPU" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Union[str, Any] = Text("GPU" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Model" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
rect.set_stroke(__UpperCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase__ : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCamelCase , buff=0.0 )
self.add(__UpperCamelCase )
cpu_targs.append(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Any = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Loaded Checkpoint" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , aligned_edge=__UpperCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : Any = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase__ : Optional[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.play(Write(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = fill.copy().set_fill(__UpperCamelCase , opacity=0.7 )
target.move_to(__UpperCamelCase )
first_animations.append(GrowFromCenter(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
A__ : Dict = 0
A__ : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A__ : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
A__ : Any = tuple[int, int]
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> None:
UpperCAmelCase__ : Tuple = pos_x
UpperCAmelCase__ : List[str] = pos_y
UpperCAmelCase__ : List[Any] = (pos_y, pos_x)
UpperCAmelCase__ : List[Any] = goal_x
UpperCAmelCase__ : Any = goal_y
UpperCAmelCase__ : str = g_cost
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : List[str] = self.calculate_heuristic()
UpperCAmelCase__ : Optional[Any] = self.g_cost + self.h_cost
def lowerCAmelCase__ ( self )-> float:
UpperCAmelCase__ : Union[str, Any] = self.pos_x - self.goal_x
UpperCAmelCase__ : str = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(__UpperCamelCase ) + abs(__UpperCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , __UpperCamelCase )-> bool:
return self.f_cost < other.f_cost
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __UpperCamelCase )
UpperCAmelCase__ : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , __UpperCamelCase )
UpperCAmelCase__ : int = [self.start]
UpperCAmelCase__ : list[Node] = []
UpperCAmelCase__ : Dict = False
def lowerCAmelCase__ ( self )-> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase__ : List[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(__UpperCamelCase )
self.closed_nodes.append(__UpperCamelCase )
UpperCAmelCase__ : str = self.get_successors(__UpperCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__UpperCamelCase )
else:
# retrieve the best current path
UpperCAmelCase__ : Any = self.open_nodes.pop(self.open_nodes.index(__UpperCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__UpperCamelCase )
else:
self.open_nodes.append(__UpperCamelCase )
return [self.start.pos]
def lowerCAmelCase__ ( self , __UpperCamelCase )-> list[Node]:
UpperCAmelCase__ : str = []
for action in delta:
UpperCAmelCase__ : Dict = parent.pos_x + action[1]
UpperCAmelCase__ : Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__UpperCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__UpperCamelCase , __UpperCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __UpperCamelCase , ) )
return successors
def lowerCAmelCase__ ( self , __UpperCamelCase )-> list[TPosition]:
UpperCAmelCase__ : Optional[int] = node
UpperCAmelCase__ : List[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase__ : Tuple = current_node.parent
path.reverse()
return path
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase )-> None:
UpperCAmelCase__ : Any = AStar(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = AStar(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : int = False
def lowerCAmelCase__ ( self )-> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase__ : int = self.fwd_astar.open_nodes.pop(0 )
UpperCAmelCase__ : Dict = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
__UpperCamelCase , __UpperCamelCase )
self.fwd_astar.closed_nodes.append(__UpperCamelCase )
self.bwd_astar.closed_nodes.append(__UpperCamelCase )
UpperCAmelCase__ : List[str] = current_bwd_node
UpperCAmelCase__ : Any = current_fwd_node
UpperCAmelCase__ : List[str] = {
self.fwd_astar: self.fwd_astar.get_successors(__UpperCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(__UpperCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(__UpperCamelCase )
else:
# retrieve the best current path
UpperCAmelCase__ : Any = astar.open_nodes.pop(
astar.open_nodes.index(__UpperCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(__UpperCamelCase )
else:
astar.open_nodes.append(__UpperCamelCase )
return [self.fwd_astar.start.pos]
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> list[TPosition]:
UpperCAmelCase__ : Tuple = self.fwd_astar.retrace_path(__UpperCamelCase )
UpperCAmelCase__ : Tuple = self.bwd_astar.retrace_path(__UpperCamelCase )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase__ : Any = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
A__ : Any = (0, 0)
A__ : Tuple = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A__ : Any = time.time()
A__ : int = AStar(init, goal)
A__ : List[Any] = a_star.search()
A__ : Optional[int] = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
A__ : List[Any] = time.time()
A__ : int = BidirectionalAStar(init, goal)
A__ : Any = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 660 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A__ : Tuple = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : nn.ModuleList , lowerCAmelCase : nn.ModuleList , lowerCAmelCase : List[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F"{len(lowerCAmelCase )} != {len(lowerCAmelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A__ : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A__ : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
'''simple docstring'''
try:
UpperCAmelCase__ : Tuple = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCAmelCase ) )
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Tuple ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( lowerCAmelCase : Union[str, PreTrainedModel] , lowerCAmelCase : Union[str, Path] = "student" , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"teacher must be a model or string got type {type(lowerCAmelCase )}"
UpperCAmelCase__ : int = teacher.config.to_diff_dict()
try:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase__ : Tuple = teacher_e
if d is None:
UpperCAmelCase__ : str = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase__ : Optional[Any] = teacher_e
if d is None:
UpperCAmelCase__ : Optional[Any] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
UpperCAmelCase__ : Tuple = teacher.config_class(**lowerCAmelCase )
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase__ : Optional[int] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase__ , UpperCAmelCase__ : int = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
UpperCAmelCase__ : int = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 660 | 1 |
"""simple docstring"""
import argparse
import copy
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = {}
with open(lowerCAmelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCAmelCase__ : List[Any] = []
_list.append([line.split()[1], line.split()[2]] )
UpperCAmelCase__ : List[Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCAmelCase__ : Union[str, Any] = []
_list.append([line.split()[0], line.split()[2]] )
UpperCAmelCase__ : str = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def a__ ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
with open(lowerCAmelCase ) as f:
UpperCAmelCase__ : Optional[Any] = f.read(1 )
UpperCAmelCase__ : Any = start_node
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Any = start_node
UpperCAmelCase__ : Optional[Any] = 0
while visiting not in first_solution:
UpperCAmelCase__ : Union[str, Any] = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowerCAmelCase ) and k[0] not in first_solution:
UpperCAmelCase__ : int = k[1]
UpperCAmelCase__ : List[Any] = k[0]
first_solution.append(lowerCAmelCase )
UpperCAmelCase__ : Dict = distance_of_first_solution + int(lowerCAmelCase )
UpperCAmelCase__ : Tuple = best_node
first_solution.append(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCAmelCase__ : Union[str, Any] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = []
for n in solution[1:-1]:
UpperCAmelCase__ : Any = solution.index(lowerCAmelCase )
for kn in solution[1:-1]:
UpperCAmelCase__ : List[Any] = solution.index(lowerCAmelCase )
if n == kn:
continue
UpperCAmelCase__ : Optional[int] = copy.deepcopy(lowerCAmelCase )
UpperCAmelCase__ : Dict = kn
UpperCAmelCase__ : Tuple = n
UpperCAmelCase__ : int = 0
for k in _tmp[:-1]:
UpperCAmelCase__ : List[str] = _tmp[_tmp.index(lowerCAmelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCAmelCase__ : List[str] = distance + int(i[1] )
_tmp.append(lowerCAmelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCAmelCase__ : Dict = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowerCAmelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : Tuple = first_solution
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Union[str, Any] = distance_of_first_solution
UpperCAmelCase__ : Any = solution
while count <= iters:
UpperCAmelCase__ : Union[str, Any] = find_neighborhood(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Tuple = neighborhood[index_of_best_solution]
UpperCAmelCase__ : Optional[int] = len(lowerCAmelCase ) - 1
UpperCAmelCase__ : Tuple = False
while not found:
UpperCAmelCase__ : Optional[Any] = 0
while i < len(lowerCAmelCase ):
if best_solution[i] != solution[i]:
UpperCAmelCase__ : Optional[Any] = best_solution[i]
UpperCAmelCase__ : Dict = solution[i]
break
UpperCAmelCase__ : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Union[str, Any] = best_solution[:-1]
UpperCAmelCase__ : Optional[Any] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCAmelCase__ : List[Any] = cost
UpperCAmelCase__ : Optional[int] = solution
else:
UpperCAmelCase__ : List[str] = index_of_best_solution + 1
UpperCAmelCase__ : str = neighborhood[index_of_best_solution]
if len(lowerCAmelCase ) >= size:
tabu_list.pop(0 )
UpperCAmelCase__ : List[str] = count + 1
return best_solution_ever, best_cost
def a__ ( lowerCAmelCase : str=None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = generate_neighbours(args.File )
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = generate_first_solution(
args.File , lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = tabu_search(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 660 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Tuple = ort.SessionOptions()
UpperCAmelCase__ : List[str] = False
return options
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : Tuple = np.random.RandomState(0 )
UpperCAmelCase__ : Any = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images
UpperCAmelCase__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : List[str] = np.random.RandomState(0 )
UpperCAmelCase__ : str = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : int = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 660 | 1 |
"""simple docstring"""
import qiskit
def a__ ( lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
UpperCAmelCase__ : List[str] = qiskit.QuantumCircuit(lowerCAmelCase , lowerCAmelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
UpperCAmelCase__ : Optional[int] = qiskit.execute(lowerCAmelCase , lowerCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCAmelCase )
if __name__ == "__main__":
print(f"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 660 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'table-transformer'
_A = ['past_key_values']
_A = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=1_00 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , )-> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase__ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : int = backbone_config.get("model_type" )
UpperCAmelCase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : int = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = None, None, None
UpperCAmelCase__ : Optional[int] = use_timm_backbone
UpperCAmelCase__ : Dict = backbone_config
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = num_queries
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : Optional[int] = encoder_ffn_dim
UpperCAmelCase__ : str = encoder_layers
UpperCAmelCase__ : Dict = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : List[str] = dropout
UpperCAmelCase__ : Tuple = attention_dropout
UpperCAmelCase__ : List[Any] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : List[str] = init_xavier_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Dict = auxiliary_loss
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : List[str] = backbone
UpperCAmelCase__ : List[Any] = use_pretrained_backbone
UpperCAmelCase__ : List[str] = dilation
# Hungarian matcher
UpperCAmelCase__ : Dict = class_cost
UpperCAmelCase__ : Any = bbox_cost
UpperCAmelCase__ : Tuple = giou_cost
# Loss coefficients
UpperCAmelCase__ : Any = mask_loss_coefficient
UpperCAmelCase__ : Dict = dice_loss_coefficient
UpperCAmelCase__ : Any = bbox_loss_coefficient
UpperCAmelCase__ : Tuple = giou_loss_coefficient
UpperCAmelCase__ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self )-> int:
return self.d_model
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-5
@property
def lowerCAmelCase__ ( self )-> int:
return 12
| 660 | 1 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def a__ ( lowerCAmelCase : Any ):
'''simple docstring'''
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> Optional[Any]:
UpperCAmelCase__ : Optional[Any] = metric_id
class _lowercase :
'''simple docstring'''
_A = [MetricMock(lowerCAmelCase_ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def lowerCAmelCase__ ( self )-> Optional[Any]:
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if "tmp_path" in args:
UpperCAmelCase__ : Tuple = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase , match="https://huggingface.co/docs/evaluate" ):
func(*lowerCAmelCase )
| 660 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
A__ : int = getLogger(__name__)
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 1024 , lowerCAmelCase : List[Any]="val" , lowerCAmelCase : str=None , lowerCAmelCase : int=False , lowerCAmelCase : Dict="summarization" , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Dict = None , lowerCAmelCase : List[str]="" , **lowerCAmelCase : int , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = str(lowerCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=lowerCAmelCase )
UpperCAmelCase__ : List[str] = Path(lowerCAmelCase )
UpperCAmelCase__ : str = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).cuda()
if fpaa:
UpperCAmelCase__ : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase , lowerCAmelCase ) # update config with task specific params
UpperCAmelCase__ : List[Any] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase__ : Any = num_return_sequences
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase__ : int = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase__ : Union[str, Any] = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase__ : str = SeqaSeqDataset(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , max_target_length=1024 , type_path=lowerCAmelCase , n_obs=lowerCAmelCase , prefix=lowerCAmelCase , **lowerCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase__ : Union[str, Any] = ds.make_sortish_sampler(lowerCAmelCase , distributed=lowerCAmelCase , add_extra_examples=lowerCAmelCase , shuffle=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = DataLoader(lowerCAmelCase , sampler=lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn )
UpperCAmelCase__ : str = []
for batch in tqdm(lowerCAmelCase ):
UpperCAmelCase__ : Dict = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=lowerCAmelCase , num_beams=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase__ : int = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
UpperCAmelCase__ : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase__ : str = chunks(lowerCAmelCase , lowerCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(lowerCAmelCase , lowerCAmelCase )
return results, sampler.num_replicas
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=lowerCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=lowerCAmelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=lowerCAmelCase , default=lowerCAmelCase )
parser.add_argument(
"--type_path" , type=lowerCAmelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=lowerCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowerCAmelCase , default=8 , required=lowerCAmelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=lowerCAmelCase , default=-1 , required=lowerCAmelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=lowerCAmelCase , default=1 , required=lowerCAmelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=lowerCAmelCase , default=600 , required=lowerCAmelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument(
"--prefix" , type=lowerCAmelCase , required=lowerCAmelCase , default=lowerCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase__ : Optional[int] = time.time()
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = parser.parse_known_args()
UpperCAmelCase__ : int = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase__ : Dict = Path(args.save_dir + "_tmp" )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) # this handles locking.
UpperCAmelCase__ : List[str] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase__ : List[str] = {}
if args.src_lang is not None:
UpperCAmelCase__ : str = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase__ : List[str] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = eval_data_dir(
args.data_dir , lowerCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase , **lowerCAmelCase , )
if args.local_rank <= 0:
UpperCAmelCase__ : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ : Tuple = gather_results_from_each_node(lowerCAmelCase , lowerCAmelCase , args.sync_timeout )
UpperCAmelCase__ : Union[str, Any] = combine_partial_results(lowerCAmelCase )
if args.num_return_sequences > 1:
UpperCAmelCase__ : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowerCAmelCase , lowerCAmelCase )
return
UpperCAmelCase__ : Optional[Any] = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(lowerCAmelCase ) as f:
UpperCAmelCase__ : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase__ : List[Any] = "translation" in args.task
UpperCAmelCase__ : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase__ : Optional[Any] = "bleu" if calc_bleu else "rouge"
UpperCAmelCase__ : Dict = score_fn(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = time.time() - start_time
UpperCAmelCase__ : Optional[int] = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase__ : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase__ : Any = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
print(lowerCAmelCase )
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(lowerCAmelCase )
def a__ ( lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = []
for partial_result in partial_results:
records.extend(lowerCAmelCase )
UpperCAmelCase__ : Dict = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x["id"] )
UpperCAmelCase__ : List[str] = [x["pred"] for x in records]
return preds
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
# WAIT FOR lots of .json files
UpperCAmelCase__ : int = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase__ : Dict = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase__ : str = list(save_dir.glob("rank_*.json" ) )
if len(lowerCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase__ : Union[str, Any] = lmap(lowerCAmelCase , lowerCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 660 | 1 |
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0.2 , __UpperCamelCase=0.2 )-> str:
UpperCAmelCase__ : List[str] = bp_numa
UpperCAmelCase__ : str = bp_numa
UpperCAmelCase__ : List[Any] = bp_numa
UpperCAmelCase__ : Union[str, Any] = conva_get[:2]
UpperCAmelCase__ : Dict = conva_get[2]
UpperCAmelCase__ : List[Any] = size_pa
UpperCAmelCase__ : Optional[int] = rate_w
UpperCAmelCase__ : Any = rate_t
UpperCAmelCase__ : List[str] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCAmelCase__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
UpperCAmelCase__ : Optional[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
UpperCAmelCase__ : Dict = -2 * np.random.rand(self.conva[1] ) + 1
UpperCAmelCase__ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
UpperCAmelCase__ : Optional[Any] = -2 * np.random.rand(self.num_bpa ) + 1
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
# save model dict with pickle
UpperCAmelCase__ : Union[str, Any] = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__UpperCamelCase , "wb" ) as f:
pickle.dump(__UpperCamelCase , __UpperCamelCase )
print(F"Model saved: {save_path}" )
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase )-> Dict:
# read saved model
with open(__UpperCamelCase , "rb" ) as f:
UpperCAmelCase__ : Union[str, Any] = pickle.load(__UpperCamelCase ) # noqa: S301
UpperCAmelCase__ : Optional[Any] = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
UpperCAmelCase__ : str = model_dic.get("size_pooling1" )
UpperCAmelCase__ : List[str] = model_dic.get("num_bp1" )
UpperCAmelCase__ : Union[str, Any] = model_dic.get("num_bp2" )
UpperCAmelCase__ : Dict = model_dic.get("num_bp3" )
UpperCAmelCase__ : int = model_dic.get("rate_weight" )
UpperCAmelCase__ : str = model_dic.get("rate_thre" )
# create model instance
UpperCAmelCase__ : int = CNN(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# modify model parameter
UpperCAmelCase__ : List[Any] = model_dic.get("w_conv1" )
UpperCAmelCase__ : int = model_dic.get("wkj" )
UpperCAmelCase__ : Any = model_dic.get("vji" )
UpperCAmelCase__ : int = model_dic.get("thre_conv1" )
UpperCAmelCase__ : Optional[int] = model_dic.get("thre_bp2" )
UpperCAmelCase__ : Dict = model_dic.get("thre_bp3" )
return conv_ins
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Dict:
return 1 / (1 + np.exp(-1 * x ))
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Tuple:
return round(__UpperCamelCase , 3 )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Tuple:
# convolution process
UpperCAmelCase__ : Optional[int] = convs[0]
UpperCAmelCase__ : Union[str, Any] = convs[1]
UpperCAmelCase__ : Tuple = np.shape(__UpperCamelCase )[0]
# get the data slice of original image data, data_focus
UpperCAmelCase__ : Optional[int] = []
for i_focus in range(0 , size_data - size_conv + 1 , __UpperCamelCase ):
for j_focus in range(0 , size_data - size_conv + 1 , __UpperCamelCase ):
UpperCAmelCase__ : int = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__UpperCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__UpperCamelCase ):
UpperCAmelCase__ : str = []
for i_focus in range(len(__UpperCamelCase ) ):
UpperCAmelCase__ : Dict = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__UpperCamelCase ) )
UpperCAmelCase__ : int = np.asmatrix(__UpperCamelCase ).reshape(
__UpperCamelCase , __UpperCamelCase )
data_featuremap.append(__UpperCamelCase )
# expanding the data slice to One dimenssion
UpperCAmelCase__ : Any = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) )
UpperCAmelCase__ : List[str] = np.asarray(__UpperCamelCase )
return focus_list, data_featuremap
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="average_pool" )-> Tuple:
# pooling process
UpperCAmelCase__ : List[Any] = len(featuremaps[0] )
UpperCAmelCase__ : Dict = int(size_map / size_pooling )
UpperCAmelCase__ : Dict = []
for i_map in range(len(__UpperCamelCase ) ):
UpperCAmelCase__ : Optional[Any] = featuremaps[i_map]
UpperCAmelCase__ : List[str] = []
for i_focus in range(0 , __UpperCamelCase , __UpperCamelCase ):
for j_focus in range(0 , __UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : List[str] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__UpperCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__UpperCamelCase ) )
UpperCAmelCase__ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase , __UpperCamelCase )
featuremap_pooled.append(__UpperCamelCase )
return featuremap_pooled
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
# expanding three dimension data to one dimension list
UpperCAmelCase__ : List[str] = []
for i in range(len(__UpperCamelCase ) ):
UpperCAmelCase__ : Optional[Any] = np.shape(data[i] )
UpperCAmelCase__ : Tuple = data[i].reshape(1 , shapes[0] * shapes[1] )
UpperCAmelCase__ : Optional[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = np.asarray(__UpperCamelCase )
return data_expanded
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
# expanding matrix to one dimension list
UpperCAmelCase__ : Tuple = np.asarray(__UpperCamelCase )
UpperCAmelCase__ : Any = np.shape(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[Any]:
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Tuple = 0
for i_map in range(__UpperCamelCase ):
UpperCAmelCase__ : str = np.ones((size_map, size_map) )
for i in range(0 , __UpperCamelCase , __UpperCamelCase ):
for j in range(0 , __UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : List[Any] = pd_pool[
i_pool
]
UpperCAmelCase__ : str = i_pool + 1
UpperCAmelCase__ : Optional[Any] = np.multiply(
__UpperCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(__UpperCamelCase )
return pd_all
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=bool )-> Optional[int]:
# model traning
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(__UpperCamelCase )) )
print((" - - Shape: Teach_Data ", np.shape(__UpperCamelCase )) )
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Any = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
UpperCAmelCase__ : str = 0
print(F"-------------Learning Time {rp}--------------" )
for p in range(len(__UpperCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCAmelCase__ : Union[str, Any] = np.asmatrix(datas_train[p] )
UpperCAmelCase__ : int = np.asarray(datas_teach[p] )
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.convolute(
__UpperCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCAmelCase__ : Dict = self.pooling(__UpperCamelCase , self.size_poolinga )
UpperCAmelCase__ : List[str] = np.shape(__UpperCamelCase )
UpperCAmelCase__ : str = self._expand(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = data_bp_input
UpperCAmelCase__ : str = np.dot(__UpperCamelCase , self.vji.T ) - self.thre_bpa
UpperCAmelCase__ : Dict = self.sig(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = np.dot(__UpperCamelCase , self.wkj.T ) - self.thre_bpa
UpperCAmelCase__ : Optional[int] = self.sig(__UpperCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCAmelCase__ : List[str] = np.multiply(
(data_teach - bp_outa) , np.multiply(__UpperCamelCase , (1 - bp_outa) ) )
UpperCAmelCase__ : Tuple = np.multiply(
np.dot(__UpperCamelCase , self.wkj ) , np.multiply(__UpperCamelCase , (1 - bp_outa) ) )
UpperCAmelCase__ : int = np.dot(__UpperCamelCase , self.vji )
UpperCAmelCase__ : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCAmelCase__ : List[Any] = pd_conva_pooled.T.getA().tolist()
UpperCAmelCase__ : str = self._calculate_gradient_from_pool(
__UpperCamelCase , __UpperCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCAmelCase__ : Any = self._expand_mat(pd_conva_all[k_conv] )
UpperCAmelCase__ : Optional[int] = self.rate_weight * np.dot(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Tuple = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCAmelCase__ : int = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCAmelCase__ : str = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCAmelCase__ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCAmelCase__ : Optional[int] = self.thre_bpa - pd_k_all * self.rate_thre
UpperCAmelCase__ : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCAmelCase__ : Optional[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCAmelCase__ : int = rp + 1
UpperCAmelCase__ : Union[str, Any] = error_count / patterns
all_mse.append(__UpperCamelCase )
def draw_error():
UpperCAmelCase__ : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__UpperCamelCase , "+-" )
plt.plot(__UpperCamelCase , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(__UpperCamelCase , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}") )
if draw_e:
draw_error()
return mse
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Dict:
# model predict
UpperCAmelCase__ : List[str] = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(__UpperCamelCase )) )
for p in range(len(__UpperCamelCase ) ):
UpperCAmelCase__ : Tuple = np.asmatrix(datas_test[p] )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.convolute(
__UpperCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCAmelCase__ : Any = self.pooling(__UpperCamelCase , self.size_poolinga )
UpperCAmelCase__ : Tuple = self._expand(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = data_bp_input
UpperCAmelCase__ : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
UpperCAmelCase__ : Any = self.sig(__UpperCamelCase )
UpperCAmelCase__ : List[str] = bp_outa * self.wkj.T - self.thre_bpa
UpperCAmelCase__ : Any = self.sig(__UpperCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
UpperCAmelCase__ : Tuple = [list(map(self.do_round , __UpperCamelCase ) ) for each in produce_out]
return np.asarray(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> int:
# return the data of image after convoluting process so we can check it out
UpperCAmelCase__ : Any = np.asmatrix(__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : str = self.convolute(
__UpperCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCAmelCase__ : Dict = self.pooling(__UpperCamelCase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 660 |
"""simple docstring"""
from timeit import timeit
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ):
'''simple docstring'''
def do_benchmark(lowerCAmelCase : int ) -> None:
UpperCAmelCase__ : Dict = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Tuple = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowerCAmelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Any = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowerCAmelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 660 | 1 |
"""simple docstring"""
from maths.prime_check import is_prime
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = F"Input value of [number={number}] must be an integer"
raise TypeError(lowerCAmelCase )
if is_prime(lowerCAmelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowercase ( unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[Any] = load_tool("text-classification" )
self.tool.setup()
UpperCAmelCase__ : List[str] = load_tool("text-classification" , remote=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[Any] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Any = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : str = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
| 660 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(lowerCAmelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 |
"""simple docstring"""
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ) ) )
def a__ ( lowerCAmelCase : list[float] ):
'''simple docstring'''
if point:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
for item in point:
if not isinstance(lowerCAmelCase , (int, float) ):
UpperCAmelCase__ : Tuple = (
"Expected a list of numbers as input, found "
F"{type(lowerCAmelCase ).__name__}"
)
raise TypeError(lowerCAmelCase )
else:
UpperCAmelCase__ : Dict = F"Expected a list of numbers as input, found {type(lowerCAmelCase ).__name__}"
raise TypeError(lowerCAmelCase )
else:
raise ValueError("Missing an input" )
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase , lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : str = logging.get_logger(__name__)
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'timm_backbone'
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=None , **__UpperCamelCase , )-> Union[str, Any]:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : int = backbone
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : List[Any] = features_only
UpperCAmelCase__ : str = use_pretrained_backbone
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : List[str] = out_indices if out_indices is not None else (-1,)
| 660 |
"""simple docstring"""
import math
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( lowerCAmelCase : int = 1_0001 ):
'''simple docstring'''
try:
UpperCAmelCase__ : List[str] = int(lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
UpperCAmelCase__ : list[int] = []
UpperCAmelCase__ : str = 2
while len(lowerCAmelCase ) < nth:
if is_prime(lowerCAmelCase ):
primes.append(lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 1 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
A__ : Union[str, Any] = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class _lowercase :
'''simple docstring'''
_A = 42
_A = None
_A = None
_A = None
_A = None
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = _str_to_version_tuple(self.version_str )
def __repr__( self )-> str:
return F"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def lowerCAmelCase__ ( self )-> Dict:
return self.major, self.minor, self.patch
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return Version(__UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
return other
raise TypeError(F"{other} (type {type(__UpperCamelCase )}) cannot be compared to version." )
def __eq__( self , __UpperCamelCase )-> Optional[int]:
try:
UpperCAmelCase__ : Any = self._validate_operand(__UpperCamelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , __UpperCamelCase )-> List[str]:
UpperCAmelCase__ : Any = self._validate_operand(__UpperCamelCase )
return self.tuple < other.tuple
def __hash__( self )-> Optional[Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase )-> Dict:
UpperCAmelCase__ : str = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowerCAmelCase__ ( self )-> str:
return self.version_str
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = _VERSION_REG.match(lowerCAmelCase )
if not res:
raise ValueError(F"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(lowerCAmelCase ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def a__ ( lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
return ".".join(str(lowerCAmelCase ) for v in version_tuple )
| 660 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=0.6 , __UpperCamelCase=None , )-> List[Any]:
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : int = mask_ratio
UpperCAmelCase__ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self )-> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = ViTMAEModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[Any] = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCAmelCase__ : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_A = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = ViTMAEModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self )-> Dict:
pass
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Dict = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCAmelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ : str = torch.from_numpy(__UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase__ : Optional[Any] = pt_noise
super().check_pt_tf_models(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[Any] = outputs[0].cpu().numpy()
UpperCAmelCase__ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model_class.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
# Make sure we don't have nans
UpperCAmelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> List[str]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Any:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Optional[Any]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self )-> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
pass
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ViTMAEModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self )-> List[Any]:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self )-> Optional[int]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase__ : Any = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCamelCase )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase__ : List[Any] = ViTMAEConfig()
UpperCAmelCase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase__ : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**__UpperCamelCase , noise=torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase ) )
# verify the logits
UpperCAmelCase__ : str = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCamelCase ) , atol=1E-4 ) )
| 660 | 1 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A__ : Optional[Any] = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : bool , lowerCAmelCase : bool ):
'''simple docstring'''
def run_func(lowerCAmelCase : Dict ):
@wraps(lowerCAmelCase )
def run_in_eager_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict ):
return func(*lowerCAmelCase , **lowerCAmelCase )
@wraps(lowerCAmelCase )
@tf.function(experimental_compile=lowerCAmelCase )
def run_in_graph_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any] ):
return func(*lowerCAmelCase , **lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a__ ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = random.Random()
UpperCAmelCase__ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = 42
_A = "TensorFlow"
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return tf.__version__
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
# initialize GPU on separate process
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Union[str, Any] = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
UpperCAmelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : List[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Any = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Optional[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : Union[str, Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Optional[int] = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : str = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : Any = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : List[Any] = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Dict = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : Any = TF_MODEL_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : int = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Optional[Any] = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , training=__UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__UpperCamelCase , training=__UpperCamelCase )
UpperCAmelCase__ : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : List[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Any = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : Any = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : int = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : int = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : Union[str, Any] = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Dict = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Union[str, Any] = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Any = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
UpperCAmelCase__ : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase__ ( self , __UpperCamelCase )-> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__UpperCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase__ : Optional[Any] = timeit.repeat(
__UpperCamelCase , repeat=self.args.repeat , number=10 , )
return min(__UpperCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCAmelCase__ : List[str] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCAmelCase__ : Optional[int] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(__UpperCamelCase )
UpperCAmelCase__ : str = meminfo.used
UpperCAmelCase__ : int = Memory(__UpperCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCAmelCase__ : Any = None
else:
UpperCAmelCase__ : List[Any] = measure_peak_memory_cpu(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = Memory(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase__ : Optional[Any] = stop_memory_tracing(__UpperCamelCase )
if memory is None:
UpperCAmelCase__ : Tuple = summary.total
else:
UpperCAmelCase__ : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 660 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowercase :
'''simple docstring'''
_A = 42
# setable values
_A = 42
_A = 42
_A = None
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
return cls(common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase )
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_A = [e.name for e in FlaxKarrasDiffusionSchedulers]
_A = 42
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return True
@register_to_config
def __init__( self , __UpperCamelCase = 10_00 , __UpperCamelCase = 0.0001 , __UpperCamelCase = 0.02 , __UpperCamelCase = "linear" , __UpperCamelCase = None , __UpperCamelCase = "fixed_small" , __UpperCamelCase = True , __UpperCamelCase = "epsilon" , __UpperCamelCase = jnp.floataa , )-> List[str]:
UpperCAmelCase__ : int = dtype
def lowerCAmelCase__ ( self , __UpperCamelCase = None )-> DDPMSchedulerState:
if common is None:
UpperCAmelCase__ : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None )-> jnp.ndarray:
return sample
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = () )-> DDPMSchedulerState:
UpperCAmelCase__ : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Optional[int] = (jnp.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : Any = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : Dict = jnp.clip(__UpperCamelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Tuple = jnp.log(jnp.clip(__UpperCamelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
UpperCAmelCase__ : Optional[int] = (predicted_variance + 1) / 2
UpperCAmelCase__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , )-> Union[FlaxDDPMSchedulerOutput, Tuple]:
UpperCAmelCase__ : List[str] = timestep
if key is None:
UpperCAmelCase__ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = jnp.split(__UpperCamelCase , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : Optional[Any] = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : Union[str, Any] = 1 - alpha_prod_t
UpperCAmelCase__ : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : Any = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : List[Any] = jnp.clip(__UpperCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : Any = jax.random.split(__UpperCamelCase , num=1 )
UpperCAmelCase__ : int = jax.random.normal(__UpperCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__UpperCamelCase , __UpperCamelCase , predicted_variance=__UpperCamelCase ) ** 0.5) * noise
UpperCAmelCase__ : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__UpperCamelCase , state=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return add_noise_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return get_velocity_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __len__( self )-> Tuple:
return self.config.num_train_timesteps
| 660 | 1 |
"""simple docstring"""
from string import ascii_uppercase
A__ : Optional[Any] = {char: i for i, char in enumerate(ascii_uppercase)}
A__ : Optional[Any] = dict(enumerate(ascii_uppercase))
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(lowerCAmelCase )
UpperCAmelCase__ : Any = 0
while True:
if x == i:
UpperCAmelCase__ : Union[str, Any] = 0
if len(lowerCAmelCase ) == len(lowerCAmelCase ):
break
key += key[i]
i += 1
return key
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ""
UpperCAmelCase__ : Tuple = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
UpperCAmelCase__ : List[str] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ""
UpperCAmelCase__ : Tuple = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
UpperCAmelCase__ : List[str] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = "THE GERMAN ATTACK"
UpperCAmelCase__ : Optional[int] = "SECRET"
UpperCAmelCase__ : Union[str, Any] = generate_key(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = cipher_text(lowerCAmelCase , lowerCAmelCase )
print(F"Encrypted Text = {s}" )
print(F"Original Text = {original_text(lowerCAmelCase , lowerCAmelCase )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 660 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ''
_A = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
super().__init__(self , **__UpperCamelCase )
UpperCAmelCase__ : int = repo_info
UpperCAmelCase__ : Optional[int] = token
UpperCAmelCase__ : Optional[Any] = None
def lowerCAmelCase__ ( self )-> Optional[Any]:
if self.dir_cache is None:
UpperCAmelCase__ : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ : str = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"name": str(__UpperCamelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , )-> List[Any]:
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> List[str]:
self._get_dirs()
UpperCAmelCase__ : Union[str, Any] = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase )-> str:
self._get_dirs()
UpperCAmelCase__ : str = PurePosixPath(path.strip("/" ) )
UpperCAmelCase__ : Optional[Any] = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ : Optional[int] = PurePosixPath(p.strip("/" ) )
UpperCAmelCase__ : Dict = p.parent
if root == path:
UpperCAmelCase__ : Tuple = f
UpperCAmelCase__ : List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 1 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
A__ : int = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
A__ : Union[str, Any] = logging.get_logger(__name__)
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'maskformer'
_A = {'hidden_size': 'mask_feature_size'}
_A = ['resnet', 'swin']
_A = ['detr']
def __init__( self , __UpperCamelCase = 2_56 , __UpperCamelCase = 2_56 , __UpperCamelCase = 0.1 , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0.02 , __UpperCamelCase = 1.0 , __UpperCamelCase = 1.0 , __UpperCamelCase = 1.0 , __UpperCamelCase = 20.0 , __UpperCamelCase = None , **__UpperCamelCase , )-> Union[str, Any]:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase__ : Dict = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : str = backbone_config.pop("model_type" )
UpperCAmelCase__ : Dict = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : Tuple = config_class.from_dict(__UpperCamelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
F"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase__ : Tuple = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase__ : List[Any] = (
decoder_config.pop("model_type" ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"Transformer Decoder {decoder_type} not supported, please use one of"
F" {','.join(self.decoders_supported )}" )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Optional[int] = CONFIG_MAPPING[decoder_type]
UpperCAmelCase__ : Optional[int] = config_class.from_dict(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = backbone_config
UpperCAmelCase__ : Optional[Any] = decoder_config
# main feature dimension for the model
UpperCAmelCase__ : Dict = fpn_feature_size
UpperCAmelCase__ : List[str] = mask_feature_size
# initializer
UpperCAmelCase__ : List[Any] = init_std
UpperCAmelCase__ : List[Any] = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase__ : Optional[int] = cross_entropy_weight
UpperCAmelCase__ : Union[str, Any] = dice_weight
UpperCAmelCase__ : str = mask_weight
UpperCAmelCase__ : Optional[int] = use_auxiliary_loss
UpperCAmelCase__ : List[str] = no_object_weight
UpperCAmelCase__ : List[Any] = output_auxiliary_logits
UpperCAmelCase__ : Dict = self.decoder_config.encoder_attention_heads
UpperCAmelCase__ : int = self.decoder_config.num_hidden_layers
super().__init__(**__UpperCamelCase )
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )-> Optional[Any]:
return cls(
backbone_config=__UpperCamelCase , decoder_config=__UpperCamelCase , **__UpperCamelCase , )
def lowerCAmelCase__ ( self )-> Dict[str, any]:
UpperCAmelCase__ : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ : Any = self.backbone_config.to_dict()
UpperCAmelCase__ : int = self.decoder_config.to_dict()
UpperCAmelCase__ : Union[str, Any] = self.__class__.model_type
return output
| 660 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : Dict = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['pixel_values']
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase__ : List[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase__ : int = get_size_dict(__UpperCamelCase , param_name="crop_size" )
UpperCAmelCase__ : Dict = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : List[Any] = do_center_crop
UpperCAmelCase__ : str = crop_size
UpperCAmelCase__ : Optional[int] = resample
UpperCAmelCase__ : int = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : Union[str, Any] = offset
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(__UpperCamelCase , size["shortest_edge"] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
UpperCAmelCase__ : Any = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[Any] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> Tuple:
UpperCAmelCase__ : str = image.astype(np.floataa )
if offset:
UpperCAmelCase__ : Tuple = image - (scale / 2)
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , )-> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[Any] = to_numpy_array(__UpperCamelCase )
if do_resize:
UpperCAmelCase__ : Union[str, Any] = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
UpperCAmelCase__ : int = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
UpperCAmelCase__ : List[str] = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase , offset=__UpperCamelCase )
if do_normalize:
UpperCAmelCase__ : List[Any] = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
UpperCAmelCase__ : Dict = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> PIL.Image.Image:
UpperCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Optional[int] = offset if offset is not None else self.offset
UpperCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Tuple = get_size_dict(__UpperCamelCase , param_name="crop_size" )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase__ : List[str] = make_batched(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , offset=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase__ : Dict = {"pixel_values": videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 660 | 1 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
A__ : str = logging.get_logger(__name__)
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Dict:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Optional[int] = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
if len(__UpperCamelCase ) == 0 or len(__UpperCamelCase ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(__UpperCamelCase ) )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : List[Any] = [sequences]
UpperCAmelCase__ : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__UpperCamelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(lowerCAmelCase_ )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , __UpperCamelCase=ZeroShotClassificationArgumentHandler() , *__UpperCamelCase , **__UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : List[str] = args_parser
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def lowerCAmelCase__ ( self )-> Tuple:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=TruncationStrategy.ONLY_FIRST , **__UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : Union[str, Any] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
UpperCAmelCase__ : Dict = self.tokenizer.eos_token
try:
UpperCAmelCase__ : Optional[Any] = self.tokenizer(
__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , )
except Exception as e:
if "too short" in str(__UpperCamelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase__ : Tuple = self.tokenizer(
__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors=__UpperCamelCase , padding=__UpperCamelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def lowerCAmelCase__ ( self , **__UpperCamelCase )-> List[Any]:
if kwargs.get("multi_class" , __UpperCamelCase ) is not None:
UpperCAmelCase__ : List[str] = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
UpperCAmelCase__ : Tuple = {}
if "candidate_labels" in kwargs:
UpperCAmelCase__ : Dict = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
UpperCAmelCase__ : Optional[Any] = kwargs["hypothesis_template"]
UpperCAmelCase__ : List[str] = {}
if "multi_label" in kwargs:
UpperCAmelCase__ : str = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase , )-> List[Any]:
if len(__UpperCamelCase ) == 0:
pass
elif len(__UpperCamelCase ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase__ : List[str] = args[0]
else:
raise ValueError(F"Unable to understand extra arguments {args}" )
return super().__call__(__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase="This example is {}." )-> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : int = self._args_parser(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(__UpperCamelCase , __UpperCamelCase ) ):
UpperCAmelCase__ : Union[str, Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__UpperCamelCase ) - 1,
**model_input,
}
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : Dict = inputs["candidate_label"]
UpperCAmelCase__ : Optional[int] = inputs["sequence"]
UpperCAmelCase__ : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase__ : int = self.model(**__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False )-> List[str]:
UpperCAmelCase__ : List[str] = [outputs["candidate_label"] for outputs in model_outputs]
UpperCAmelCase__ : int = [outputs["sequence"] for outputs in model_outputs]
UpperCAmelCase__ : Optional[Any] = np.concatenate([output["logits"].numpy() for output in model_outputs] )
UpperCAmelCase__ : Dict = logits.shape[0]
UpperCAmelCase__ : Any = len(__UpperCamelCase )
UpperCAmelCase__ : Any = N // n
UpperCAmelCase__ : Optional[int] = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__UpperCamelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase__ : List[Any] = self.entailment_id
UpperCAmelCase__ : List[str] = -1 if entailment_id == 0 else 0
UpperCAmelCase__ : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase__ : Optional[int] = np.exp(__UpperCamelCase ) / np.exp(__UpperCamelCase ).sum(-1 , keepdims=__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase__ : Dict = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase__ : List[Any] = np.exp(__UpperCamelCase ) / np.exp(__UpperCamelCase ).sum(-1 , keepdims=__UpperCamelCase )
UpperCAmelCase__ : List[Any] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 660 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(lowerCAmelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
'''simple docstring'''
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[int] = GPTaConfig()
else:
UpperCAmelCase__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = GPTaModel(lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
UpperCAmelCase__ : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase__ : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
A__ : Optional[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A__ : Optional[Any] = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : bool , lowerCAmelCase : bool ):
'''simple docstring'''
def run_func(lowerCAmelCase : Dict ):
@wraps(lowerCAmelCase )
def run_in_eager_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict ):
return func(*lowerCAmelCase , **lowerCAmelCase )
@wraps(lowerCAmelCase )
@tf.function(experimental_compile=lowerCAmelCase )
def run_in_graph_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any] ):
return func(*lowerCAmelCase , **lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a__ ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = random.Random()
UpperCAmelCase__ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = 42
_A = "TensorFlow"
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return tf.__version__
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
# initialize GPU on separate process
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Union[str, Any] = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
UpperCAmelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : List[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Any = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Optional[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : Union[str, Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Optional[int] = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : str = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : Any = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : List[Any] = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Dict = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : Any = TF_MODEL_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : int = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Optional[Any] = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , training=__UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__UpperCamelCase , training=__UpperCamelCase )
UpperCAmelCase__ : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : List[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Any = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : Any = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : int = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : int = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : Union[str, Any] = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Dict = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Union[str, Any] = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Any = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
UpperCAmelCase__ : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase__ ( self , __UpperCamelCase )-> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__UpperCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase__ : Optional[Any] = timeit.repeat(
__UpperCamelCase , repeat=self.args.repeat , number=10 , )
return min(__UpperCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCAmelCase__ : List[str] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCAmelCase__ : Optional[int] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(__UpperCamelCase )
UpperCAmelCase__ : str = meminfo.used
UpperCAmelCase__ : int = Memory(__UpperCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCAmelCase__ : Any = None
else:
UpperCAmelCase__ : List[Any] = measure_peak_memory_cpu(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = Memory(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase__ : Optional[Any] = stop_memory_tracing(__UpperCamelCase )
if memory is None:
UpperCAmelCase__ : Tuple = summary.total
else:
UpperCAmelCase__ : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 660 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['image_processor', 'tokenizer']
_A = 'BlipImageProcessor'
_A = 'AutoTokenizer'
def __init__( self , __UpperCamelCase , __UpperCamelCase )-> List[str]:
UpperCAmelCase__ : List[Any] = False
super().__init__(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = self.image_processor
def __call__( self , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
UpperCAmelCase__ : Optional[Any] = self.tokenizer
UpperCAmelCase__ : str = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
return text_encoding
# add pixel_values
UpperCAmelCase__ : List[str] = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase )
if text is not None:
UpperCAmelCase__ : Any = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
else:
UpperCAmelCase__ : Optional[Any] = None
if text_encoding is not None:
encoding_image_processor.update(__UpperCamelCase )
return encoding_image_processor
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> str:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> Optional[Any]:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : int = self.tokenizer.model_input_names
UpperCAmelCase__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 660 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
A__ : List[str] = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 660 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : List[Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase__ : Optional[int] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase__ : Optional[Any] = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ : Dict = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(__UpperCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __UpperCamelCase , atol=1E-3 ) )
@slow
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : int = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
UpperCAmelCase__ : Optional[Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase__ : List[str] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase__ : int = model(__UpperCamelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , __UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __UpperCamelCase , atol=1E-3 ) )
| 660 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , __UpperCamelCase = 7_68 , )-> Union[str, Any]:
super().__init__()
UpperCAmelCase__ : str = nn.Parameter(torch.zeros(1 , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[int] = nn.Parameter(torch.ones(1 , __UpperCamelCase ) )
def lowerCAmelCase__ ( self , __UpperCamelCase = None , __UpperCamelCase = None , )-> Any:
UpperCAmelCase__ : Dict = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) )
UpperCAmelCase__ : Any = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) )
return self
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : Any = (embeds * self.std) + self.mean
return embeds
| 660 | 1 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
A__ : int = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
A__ : Union[str, Any] = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
A__ : int = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a__ ( lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] ):
'''simple docstring'''
return float((preds == labels).mean() )
def a__ ( lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict="binary" ):
'''simple docstring'''
UpperCAmelCase__ : str = simple_accuracy(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : int = float(fa_score(y_true=lowerCAmelCase , y_pred=lowerCAmelCase , average=lowerCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a__ ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = {}
for id_pred, label in zip(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : List[str] = F"{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"
UpperCAmelCase__ : Optional[Any] = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCAmelCase__ : Optional[Any] = [(pred, label)]
UpperCAmelCase__ , UpperCAmelCase__ : Dict = [], []
for question, preds_labels in question_map.items():
UpperCAmelCase__ , UpperCAmelCase__ : int = zip(*lowerCAmelCase )
UpperCAmelCase__ : str = fa_score(y_true=lowerCAmelCase , y_pred=lowerCAmelCase , average="macro" )
fas.append(lowerCAmelCase )
UpperCAmelCase__ : int = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCAmelCase ) )
ems.append(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = float(sum(lowerCAmelCase ) / len(lowerCAmelCase ) )
UpperCAmelCase__ : List[str] = sum(lowerCAmelCase ) / len(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = float(fa_score(y_true=lowerCAmelCase , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Tuple:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def lowerCAmelCase__ ( self )-> Tuple:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> List[Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__UpperCamelCase , __UpperCamelCase )}
elif self.config_name == "cb":
return acc_and_fa(__UpperCamelCase , __UpperCamelCase , fa_avg="macro" )
elif self.config_name == "record":
UpperCAmelCase__ : Union[str, Any] = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
UpperCAmelCase__ : Any = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(__UpperCamelCase , __UpperCamelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__UpperCamelCase , __UpperCamelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__UpperCamelCase , __UpperCamelCase )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 660 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
'''simple docstring'''
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[int] = GPTaConfig()
else:
UpperCAmelCase__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = GPTaModel(lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
UpperCAmelCase__ : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase__ : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
A__ : Optional[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 1 |
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
A__ : List[Any] = """bert-base-cased"""
A__ : Any = """google/pegasus-xsum"""
A__ : str = [""" Sam ate lunch today.""", """Sams lunch ingredients."""]
A__ : str = ["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""]
A__ : str = """patrickvonplaten/t5-tiny-random"""
A__ : List[str] = """sshleifer/bart-tiny-random"""
A__ : Optional[int] = """sshleifer/tiny-mbart"""
A__ : str = """sshleifer/tiny-marian-en-de"""
def a__ ( lowerCAmelCase : Path , lowerCAmelCase : list ):
'''simple docstring'''
UpperCAmelCase__ : str = "\n".join(lowerCAmelCase )
Path(lowerCAmelCase ).open("w" ).writelines(lowerCAmelCase )
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(lowerCAmelCase , F"{split}.source" ) , lowerCAmelCase )
_dump_articles(os.path.join(lowerCAmelCase , F"{split}.target" ) , lowerCAmelCase )
return tmp_dir
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Dict:
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Tuple = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
UpperCAmelCase__ : Optional[Any] = max(len(tokenizer.encode(__UpperCamelCase ) ) for a in ARTICLES )
UpperCAmelCase__ : Tuple = max(len(tokenizer.encode(__UpperCamelCase ) ) for a in SUMMARIES )
UpperCAmelCase__ : Dict = 4
UpperCAmelCase__ : Tuple = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
UpperCAmelCase__ : Dict = SeqaSeqDataset(
__UpperCamelCase , data_dir=__UpperCamelCase , type_path="train" , max_source_length=__UpperCamelCase , max_target_length=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase , )
UpperCAmelCase__ : str = DataLoader(__UpperCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
UpperCAmelCase__ : List[Any] = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
UpperCAmelCase__ : Union[str, Any] = max(len(tokenizer.encode(__UpperCamelCase ) ) for a in ARTICLES )
UpperCAmelCase__ : Any = max(len(tokenizer.encode(__UpperCamelCase ) ) for a in SUMMARIES )
UpperCAmelCase__ : List[Any] = 4
UpperCAmelCase__ : Tuple = LegacySeqaSeqDataset(
__UpperCamelCase , data_dir=__UpperCamelCase , type_path="train" , max_source_length=20 , max_target_length=__UpperCamelCase , )
UpperCAmelCase__ : str = DataLoader(__UpperCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
UpperCAmelCase__ : str = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
UpperCAmelCase__ : Dict = tmp_dir.joinpath("train.source" ).open().readlines()
UpperCAmelCase__ : Tuple = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__UpperCamelCase , __UpperCamelCase , 1_28 , __UpperCamelCase )
UpperCAmelCase__ : Dict = {x.name for x in tmp_dir.iterdir()}
UpperCAmelCase__ : Any = {x.name for x in save_dir.iterdir()}
UpperCAmelCase__ : Dict = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__UpperCamelCase ) < len(__UpperCamelCase )
assert len(__UpperCamelCase ) == 1
assert len(packed_examples[0] ) == sum(len(__UpperCamelCase ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def lowerCAmelCase__ ( self )-> Optional[int]:
if not FAIRSEQ_AVAILABLE:
return
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self._get_dataset(max_len=64 )
UpperCAmelCase__ : List[str] = 64
UpperCAmelCase__ : Optional[int] = ds.make_dynamic_sampler(__UpperCamelCase , required_batch_size_multiple=__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = [len(__UpperCamelCase ) for x in batch_sampler]
assert len(set(__UpperCamelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__UpperCamelCase ) == len(__UpperCamelCase ) # no dropped or added examples
UpperCAmelCase__ : str = DataLoader(__UpperCamelCase , batch_sampler=__UpperCamelCase , collate_fn=ds.collate_fn , num_workers=2 )
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : str = []
for batch in data_loader:
UpperCAmelCase__ : Optional[int] = batch["input_ids"].shape
UpperCAmelCase__ : List[str] = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
UpperCAmelCase__ : List[Any] = np.product(batch["input_ids"].shape )
num_src_per_batch.append(__UpperCamelCase )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__UpperCamelCase )
assert num_src_per_batch[0] == max(__UpperCamelCase )
if failures:
raise AssertionError(F"too many tokens in {len(__UpperCamelCase )} batches" )
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self._get_dataset(max_len=5_12 )
UpperCAmelCase__ : Tuple = 2
UpperCAmelCase__ : Optional[Any] = ds.make_sortish_sampler(__UpperCamelCase , shuffle=__UpperCamelCase )
UpperCAmelCase__ : int = DataLoader(__UpperCamelCase , batch_size=__UpperCamelCase , collate_fn=ds.collate_fn , num_workers=2 )
UpperCAmelCase__ : int = DataLoader(__UpperCamelCase , batch_size=__UpperCamelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=__UpperCamelCase )
UpperCAmelCase__ : str = tokenizer.pad_token_id
def count_pad_tokens(__UpperCamelCase , __UpperCamelCase="input_ids" ):
return [batch[k].eq(__UpperCamelCase ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__UpperCamelCase , k="labels" ) ) < sum(count_pad_tokens(__UpperCamelCase , k="labels" ) )
assert sum(count_pad_tokens(__UpperCamelCase ) ) < sum(count_pad_tokens(__UpperCamelCase ) )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase=10_00 , __UpperCamelCase=1_28 )-> Any:
if os.getenv("USE_REAL_DATA" , __UpperCamelCase ):
UpperCAmelCase__ : Optional[int] = "examples/seq2seq/wmt_en_ro"
UpperCAmelCase__ : Optional[int] = max_len * 2 * 64
if not Path(__UpperCamelCase ).joinpath("train.len" ).exists():
save_len_file(__UpperCamelCase , __UpperCamelCase )
else:
UpperCAmelCase__ : List[Any] = "examples/seq2seq/test_data/wmt_en_ro"
UpperCAmelCase__ : Any = max_len * 4
save_len_file(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(__UpperCamelCase )
UpperCAmelCase__ : List[str] = SeqaSeqDataset(
__UpperCamelCase , data_dir=__UpperCamelCase , type_path="train" , max_source_length=__UpperCamelCase , max_target_length=__UpperCamelCase , n_obs=__UpperCamelCase , )
return ds, max_tokens, tokenizer
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self._get_dataset()
UpperCAmelCase__ : Any = set(DistributedSortishSampler(__UpperCamelCase , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=__UpperCamelCase ) )
UpperCAmelCase__ : int = set(DistributedSortishSampler(__UpperCamelCase , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=__UpperCamelCase ) )
assert idsa.intersection(__UpperCamelCase ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> str:
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(__UpperCamelCase , use_fast=__UpperCamelCase )
if tok_name == MBART_TINY:
UpperCAmelCase__ : str = SeqaSeqDataset(
__UpperCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
UpperCAmelCase__ : Dict = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
UpperCAmelCase__ : Optional[int] = SeqaSeqDataset(
__UpperCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
UpperCAmelCase__ : Optional[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__UpperCamelCase ) == 1 if tok_name == BART_TINY else len(__UpperCamelCase ) == 0
| 660 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A__ : Optional[int] = ["""small""", """medium""", """large"""]
A__ : Optional[int] = """lm_head.decoder.weight"""
A__ : Dict = """lm_head.weight"""
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.load(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = d.pop(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
torch.save(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
A__ : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A__ : Tuple = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
A__ : str = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 660 | 1 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
A__ : List[Any] = object()
# For specifying empty leaf dict `{}`
A__ : str = object()
def a__ ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(lowerCAmelCase ) - len(lowerCAmelCase ) + 1 ):
UpperCAmelCase__ : Dict = [x.match(lowerCAmelCase ) for x, y in zip(lowerCAmelCase , ks[i:] )]
if matches and all(lowerCAmelCase ):
return True
return False
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
def replace(lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] ):
for rule, replacement in rules:
if _match(lowerCAmelCase , lowerCAmelCase ):
return replacement
return val
return replace
def a__ ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , lowerCAmelCase )),
(("transformer", "wte", "embedding"), P("mp" , lowerCAmelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCAmelCase , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , lowerCAmelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCAmelCase , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , lowerCAmelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = _get_partition_rules()
UpperCAmelCase__ : Any = _replacement_rules(lowerCAmelCase )
UpperCAmelCase__ : Tuple = {k: _unmatched for k in flatten_dict(lowerCAmelCase )}
UpperCAmelCase__ : List[Any] = {k: replace(lowerCAmelCase , lowerCAmelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCAmelCase ) )
| 660 |
"""simple docstring"""
from math import isqrt
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = False
return [i for i in range(2 , lowerCAmelCase ) if is_prime[i]]
def a__ ( lowerCAmelCase : int = 10**8 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = len(lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=4_00 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , __UpperCamelCase=1 / 2_55 , __UpperCamelCase=True , )-> int:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase__ : Any = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Any = num_channels
UpperCAmelCase__ : str = min_resolution
UpperCAmelCase__ : Tuple = max_resolution
UpperCAmelCase__ : List[Any] = do_resize
UpperCAmelCase__ : Union[str, Any] = size
UpperCAmelCase__ : Optional[int] = do_normalize
UpperCAmelCase__ : Any = image_mean
UpperCAmelCase__ : Any = image_std
UpperCAmelCase__ : List[Any] = do_rescale
UpperCAmelCase__ : int = rescale_factor
UpperCAmelCase__ : Optional[Any] = do_pad
def lowerCAmelCase__ ( self )-> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False )-> List[Any]:
if not batched:
UpperCAmelCase__ : List[Any] = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
UpperCAmelCase__ , UpperCAmelCase__ : Dict = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ : Union[str, Any] = int(self.size["shortest_edge"] * h / w )
UpperCAmelCase__ : int = self.size["shortest_edge"]
elif w > h:
UpperCAmelCase__ : List[Any] = self.size["shortest_edge"]
UpperCAmelCase__ : Dict = int(self.size["shortest_edge"] * w / h )
else:
UpperCAmelCase__ : Union[str, Any] = self.size["shortest_edge"]
UpperCAmelCase__ : List[Any] = self.size["shortest_edge"]
else:
UpperCAmelCase__ : Any = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase__ : Dict = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
UpperCAmelCase__ : Tuple = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : str = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self )-> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "size" ) )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
UpperCAmelCase__ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> str:
pass
def lowerCAmelCase__ ( self )-> List[Any]:
# Initialize image_processing
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
UpperCAmelCase__ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
UpperCAmelCase__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : Optional[int] = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self )-> List[Any]:
# Initialize image_processing
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : Union[str, Any] = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self )-> str:
# prepare image and target
UpperCAmelCase__ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
UpperCAmelCase__ : Optional[Any] = json.loads(f.read() )
UpperCAmelCase__ : Union[str, Any] = {"image_id": 3_97_69, "annotations": target}
# encode them
UpperCAmelCase__ : Any = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
UpperCAmelCase__ : List[Any] = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="pt" )
# verify pixel values
UpperCAmelCase__ : List[str] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __UpperCamelCase )
UpperCAmelCase__ : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
UpperCAmelCase__ : Optional[Any] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __UpperCamelCase ) )
# verify boxes
UpperCAmelCase__ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
UpperCAmelCase__ : Union[str, Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __UpperCamelCase ) )
# verify is_crowd
UpperCAmelCase__ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __UpperCamelCase ) )
# verify class_labels
UpperCAmelCase__ : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __UpperCamelCase ) )
# verify orig_size
UpperCAmelCase__ : Any = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __UpperCamelCase ) )
# verify size
UpperCAmelCase__ : Tuple = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __UpperCamelCase ) )
@slow
def lowerCAmelCase__ ( self )-> Tuple:
# prepare image, target and masks_path
UpperCAmelCase__ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
UpperCAmelCase__ : Dict = json.loads(f.read() )
UpperCAmelCase__ : Tuple = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
UpperCAmelCase__ : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
UpperCAmelCase__ : str = ConditionalDetrImageProcessor(format="coco_panoptic" )
UpperCAmelCase__ : List[Any] = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="pt" )
# verify pixel values
UpperCAmelCase__ : Optional[int] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __UpperCamelCase )
UpperCAmelCase__ : Any = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
UpperCAmelCase__ : str = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __UpperCamelCase ) )
# verify boxes
UpperCAmelCase__ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __UpperCamelCase )
UpperCAmelCase__ : List[str] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
UpperCAmelCase__ : str = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __UpperCamelCase ) )
# verify is_crowd
UpperCAmelCase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __UpperCamelCase ) )
# verify class_labels
UpperCAmelCase__ : List[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __UpperCamelCase ) )
# verify masks
UpperCAmelCase__ : Optional[int] = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __UpperCamelCase )
# verify orig_size
UpperCAmelCase__ : List[str] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __UpperCamelCase ) )
# verify size
UpperCAmelCase__ : str = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __UpperCamelCase ) )
| 660 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = np.full((len(lowerCAmelCase ), sequence_length, 2) , lowerCAmelCase )
else:
UpperCAmelCase__ : Optional[Any] = np.full((len(lowerCAmelCase ), sequence_length) , lowerCAmelCase )
for i, tensor in enumerate(lowerCAmelCase ):
if padding_side == "right":
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Dict = tensor[:sequence_length]
else:
UpperCAmelCase__ : Tuple = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase__ : int = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ord(lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase__ : Optional[Any] = unicodedata.category(lowerCAmelCase )
if cat.startswith("P" ):
return True
return False
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = True
_A = None
_A = None
_A = -100
_A = "pt"
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[str]:
import torch
UpperCAmelCase__ : Optional[Any] = "label" if "label" in features[0].keys() else "labels"
UpperCAmelCase__ : Dict = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase__ : str = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch["entity_ids"] ).shape[1]
UpperCAmelCase__ : int = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase__ : int = [
list(__UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) for label in labels
]
else:
UpperCAmelCase__ : List[Any] = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) + list(__UpperCamelCase ) for label in labels
]
UpperCAmelCase__ : Optional[Any] = [feature["ner_tags"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , -1 , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = [feature["original_entity_spans"] for feature in features]
UpperCAmelCase__ : int = padding_tensor(__UpperCamelCase , (-1, -1) , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = {k: torch.tensor(__UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 660 | 1 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 660 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
def wrapper(*lowerCAmelCase : Any , **lowerCAmelCase : Tuple ):
UpperCAmelCase__ : Optional[int] = timeit.default_timer()
UpperCAmelCase__ : int = func(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase__ : List[Any] = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : int = func.__name__
return wrapper
def a__ ( lowerCAmelCase : dict , lowerCAmelCase : Optional[int]=100 , lowerCAmelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[Any] = seq_shapes or {}
for i in range(lowerCAmelCase ):
UpperCAmelCase__ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
UpperCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Dict = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase__ : str = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
UpperCAmelCase__ : List[str] = v.feature
UpperCAmelCase__ : Optional[int] = seq_shapes[k]
UpperCAmelCase__ : Optional[int] = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
UpperCAmelCase__ : Union[str, Any] = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=100 , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : List[Any] = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase__ : Optional[int] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 660 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
A__ : Tuple = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
A__ : List[str] = parser.parse_args()
if args.model_type == "bert":
A__ : Any = BertForMaskedLM.from_pretrained(args.model_name)
A__ : Any = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
A__ : List[str] = model.state_dict()
A__ : str = {}
for w in ["word_embeddings", "position_embeddings"]:
A__ : Tuple = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
A__ : Optional[Any] = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
A__ : Optional[int] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
A__ : Any = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
A__ : Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
A__ : Union[str, Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
A__ : List[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
A__ : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
A__ : int = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
A__ : str = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
A__ : List[str] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
A__ : List[str] = state_dict["""cls.predictions.decoder.weight"""]
A__ : Optional[Any] = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
A__ : Union[str, Any] = state_dict[f"""cls.predictions.transform.dense.{w}"""]
A__ : Tuple = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 660 |
"""simple docstring"""
from manim import *
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : str = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[Any] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : int = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("CPU" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Union[str, Any] = Text("GPU" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[str] = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Model" , font_size=24 )
UpperCAmelCase__ : Dict = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCamelCase )
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
rect.set_stroke(__UpperCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase__ : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCamelCase , buff=0.0 )
self.add(__UpperCamelCase )
cpu_targs.append(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Any = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
UpperCAmelCase__ : Tuple = Text("Loaded Checkpoint" , font_size=24 )
UpperCAmelCase__ : Any = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , aligned_edge=__UpperCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : Any = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase__ : Optional[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.play(Write(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(__UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = fill.copy().set_fill(__UpperCamelCase , opacity=0.7 )
target.move_to(__UpperCamelCase )
first_animations.append(GrowFromCenter(__UpperCamelCase , run_time=1 ) )
UpperCAmelCase__ : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 660 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A__ : List[str] = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 660 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A__ : Tuple = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : nn.ModuleList , lowerCAmelCase : nn.ModuleList , lowerCAmelCase : List[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F"{len(lowerCAmelCase )} != {len(lowerCAmelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A__ : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A__ : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
'''simple docstring'''
try:
UpperCAmelCase__ : Tuple = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCAmelCase ) )
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Tuple ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( lowerCAmelCase : Union[str, PreTrainedModel] , lowerCAmelCase : Union[str, Path] = "student" , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"teacher must be a model or string got type {type(lowerCAmelCase )}"
UpperCAmelCase__ : int = teacher.config.to_diff_dict()
try:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase__ : Tuple = teacher_e
if d is None:
UpperCAmelCase__ : str = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase__ : Optional[Any] = teacher_e
if d is None:
UpperCAmelCase__ : Optional[Any] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
UpperCAmelCase__ : Tuple = teacher.config_class(**lowerCAmelCase )
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase__ : Optional[int] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase__ , UpperCAmelCase__ : int = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
UpperCAmelCase__ : int = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 660 | 1 |
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def a__ ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
# load base model
UpperCAmelCase__ : Optional[int] = StableDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
UpperCAmelCase__ : int = load_file(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
UpperCAmelCase__ : Optional[Any] = key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" )
UpperCAmelCase__ : List[Any] = pipeline.text_encoder
else:
UpperCAmelCase__ : str = key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" )
UpperCAmelCase__ : Any = pipeline.unet
# find the target layer
UpperCAmelCase__ : Tuple = layer_infos.pop(0 )
while len(lowerCAmelCase ) > -1:
try:
UpperCAmelCase__ : List[Any] = curr_layer.__getattr__(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
UpperCAmelCase__ : int = layer_infos.pop(0 )
elif len(lowerCAmelCase ) == 0:
break
except Exception:
if len(lowerCAmelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
UpperCAmelCase__ : List[str] = layer_infos.pop(0 )
UpperCAmelCase__ : List[Any] = []
if "lora_down" in key:
pair_keys.append(key.replace("lora_down" , "lora_up" ) )
pair_keys.append(lowerCAmelCase )
else:
pair_keys.append(lowerCAmelCase )
pair_keys.append(key.replace("lora_up" , "lora_down" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
UpperCAmelCase__ : Dict = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
UpperCAmelCase__ : Any = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowerCAmelCase , lowerCAmelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
UpperCAmelCase__ : Any = state_dict[pair_keys[0]].to(torch.floataa )
UpperCAmelCase__ : List[Any] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowerCAmelCase , lowerCAmelCase )
# update visited list
for item in pair_keys:
visited.append(lowerCAmelCase )
return pipeline
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
A__ : Union[str, Any] = parser.parse_args()
A__ : int = args.base_model_path
A__ : str = args.checkpoint_path
A__ : int = args.dump_path
A__ : Optional[int] = args.lora_prefix_unet
A__ : Optional[int] = args.lora_prefix_text_encoder
A__ : Optional[int] = args.alpha
A__ : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
A__ : List[str] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 660 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Tuple = ort.SessionOptions()
UpperCAmelCase__ : List[str] = False
return options
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : Tuple = np.random.RandomState(0 )
UpperCAmelCase__ : Any = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images
UpperCAmelCase__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : List[str] = np.random.RandomState(0 )
UpperCAmelCase__ : str = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : int = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 660 | 1 |
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = (EulerDiscreteScheduler,)
_A = 10
def lowerCAmelCase__ ( self , **__UpperCamelCase )-> Any:
UpperCAmelCase__ : Any = {
"num_train_timesteps": 11_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__UpperCamelCase )
return config
def lowerCAmelCase__ ( self )-> str:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> str:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase__ : List[Any] = self.get_scheduler_config()
UpperCAmelCase__ : str = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase__ : int = torch.manual_seed(0 )
UpperCAmelCase__ : List[Any] = self.dummy_model()
UpperCAmelCase__ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase__ : List[Any] = sample.to(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = model(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Any = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
UpperCAmelCase__ : int = output.prev_sample
UpperCAmelCase__ : str = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCAmelCase__ : List[Any] = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase__ : Optional[Any] = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCAmelCase__ : Union[str, Any] = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = self.dummy_model()
UpperCAmelCase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase__ : Union[str, Any] = sample.to(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = model(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[int] = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
UpperCAmelCase__ : Dict = output.prev_sample
UpperCAmelCase__ : Tuple = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCAmelCase__ : Any = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.26_76E-06 ) < 1E-3
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase__ : str = self.get_scheduler_config()
UpperCAmelCase__ : Optional[int] = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = self.dummy_model()
UpperCAmelCase__ : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase__ : List[Any] = sample.to(__UpperCamelCase )
for t in scheduler.timesteps:
UpperCAmelCase__ : Tuple = scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Tuple = model(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
UpperCAmelCase__ : int = output.prev_sample
UpperCAmelCase__ : int = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCAmelCase__ : Dict = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Dict = self.scheduler_classes[0]
UpperCAmelCase__ : Any = self.get_scheduler_config()
UpperCAmelCase__ : str = scheduler_class(**__UpperCamelCase , use_karras_sigmas=__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = self.dummy_model()
UpperCAmelCase__ : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase__ : Any = sample.to(__UpperCamelCase )
for t in scheduler.timesteps:
UpperCAmelCase__ : str = scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = model(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : str = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
UpperCAmelCase__ : List[str] = output.prev_sample
UpperCAmelCase__ : Optional[int] = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCAmelCase__ : int = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
| 660 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'table-transformer'
_A = ['past_key_values']
_A = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=1_00 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=20_48 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , )-> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase__ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : int = backbone_config.get("model_type" )
UpperCAmelCase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : int = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = None, None, None
UpperCAmelCase__ : Optional[int] = use_timm_backbone
UpperCAmelCase__ : Dict = backbone_config
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = num_queries
UpperCAmelCase__ : int = d_model
UpperCAmelCase__ : Optional[int] = encoder_ffn_dim
UpperCAmelCase__ : str = encoder_layers
UpperCAmelCase__ : Dict = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : List[str] = dropout
UpperCAmelCase__ : Tuple = attention_dropout
UpperCAmelCase__ : List[Any] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : List[str] = init_xavier_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Dict = auxiliary_loss
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : List[str] = backbone
UpperCAmelCase__ : List[Any] = use_pretrained_backbone
UpperCAmelCase__ : List[str] = dilation
# Hungarian matcher
UpperCAmelCase__ : Dict = class_cost
UpperCAmelCase__ : Any = bbox_cost
UpperCAmelCase__ : Tuple = giou_cost
# Loss coefficients
UpperCAmelCase__ : Any = mask_loss_coefficient
UpperCAmelCase__ : Dict = dice_loss_coefficient
UpperCAmelCase__ : Any = bbox_loss_coefficient
UpperCAmelCase__ : Tuple = giou_loss_coefficient
UpperCAmelCase__ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCAmelCase__ ( self )-> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self )-> int:
return self.d_model
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-5
@property
def lowerCAmelCase__ ( self )-> int:
return 12
| 660 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ) ) )
def a__ ( lowerCAmelCase : list[float] ):
'''simple docstring'''
if point:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
for item in point:
if not isinstance(lowerCAmelCase , (int, float) ):
UpperCAmelCase__ : Tuple = (
"Expected a list of numbers as input, found "
F"{type(lowerCAmelCase ).__name__}"
)
raise TypeError(lowerCAmelCase )
else:
UpperCAmelCase__ : Dict = F"Expected a list of numbers as input, found {type(lowerCAmelCase ).__name__}"
raise TypeError(lowerCAmelCase )
else:
raise ValueError("Missing an input" )
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase , lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
A__ : int = getLogger(__name__)
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 1024 , lowerCAmelCase : List[Any]="val" , lowerCAmelCase : str=None , lowerCAmelCase : int=False , lowerCAmelCase : Dict="summarization" , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Dict = None , lowerCAmelCase : List[str]="" , **lowerCAmelCase : int , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = str(lowerCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=lowerCAmelCase )
UpperCAmelCase__ : List[str] = Path(lowerCAmelCase )
UpperCAmelCase__ : str = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).cuda()
if fpaa:
UpperCAmelCase__ : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase , lowerCAmelCase ) # update config with task specific params
UpperCAmelCase__ : List[Any] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase__ : Any = num_return_sequences
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase__ : int = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase__ : Union[str, Any] = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase__ : str = SeqaSeqDataset(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , max_target_length=1024 , type_path=lowerCAmelCase , n_obs=lowerCAmelCase , prefix=lowerCAmelCase , **lowerCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase__ : Union[str, Any] = ds.make_sortish_sampler(lowerCAmelCase , distributed=lowerCAmelCase , add_extra_examples=lowerCAmelCase , shuffle=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = DataLoader(lowerCAmelCase , sampler=lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn )
UpperCAmelCase__ : str = []
for batch in tqdm(lowerCAmelCase ):
UpperCAmelCase__ : Dict = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=lowerCAmelCase , num_beams=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase__ : int = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
UpperCAmelCase__ : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase__ : str = chunks(lowerCAmelCase , lowerCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(lowerCAmelCase , lowerCAmelCase )
return results, sampler.num_replicas
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=lowerCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=lowerCAmelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=lowerCAmelCase , default=lowerCAmelCase )
parser.add_argument(
"--type_path" , type=lowerCAmelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=lowerCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowerCAmelCase , default=8 , required=lowerCAmelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=lowerCAmelCase , default=-1 , required=lowerCAmelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=lowerCAmelCase , default=1 , required=lowerCAmelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=lowerCAmelCase , default=600 , required=lowerCAmelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument(
"--prefix" , type=lowerCAmelCase , required=lowerCAmelCase , default=lowerCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase__ : Optional[int] = time.time()
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = parser.parse_known_args()
UpperCAmelCase__ : int = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase__ : Dict = Path(args.save_dir + "_tmp" )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase ) # this handles locking.
UpperCAmelCase__ : List[str] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase__ : List[str] = {}
if args.src_lang is not None:
UpperCAmelCase__ : str = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase__ : List[str] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = eval_data_dir(
args.data_dir , lowerCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase , **lowerCAmelCase , )
if args.local_rank <= 0:
UpperCAmelCase__ : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase )
UpperCAmelCase__ : Tuple = gather_results_from_each_node(lowerCAmelCase , lowerCAmelCase , args.sync_timeout )
UpperCAmelCase__ : Union[str, Any] = combine_partial_results(lowerCAmelCase )
if args.num_return_sequences > 1:
UpperCAmelCase__ : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowerCAmelCase , lowerCAmelCase )
return
UpperCAmelCase__ : Optional[Any] = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(lowerCAmelCase ) as f:
UpperCAmelCase__ : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase__ : List[Any] = "translation" in args.task
UpperCAmelCase__ : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase__ : Optional[Any] = "bleu" if calc_bleu else "rouge"
UpperCAmelCase__ : Dict = score_fn(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = len(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = time.time() - start_time
UpperCAmelCase__ : Optional[int] = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase__ : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase__ : Any = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
print(lowerCAmelCase )
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowerCAmelCase , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(lowerCAmelCase )
def a__ ( lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = []
for partial_result in partial_results:
records.extend(lowerCAmelCase )
UpperCAmelCase__ : Dict = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x["id"] )
UpperCAmelCase__ : List[str] = [x["pred"] for x in records]
return preds
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
'''simple docstring'''
# WAIT FOR lots of .json files
UpperCAmelCase__ : int = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase__ : Dict = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase__ : str = list(save_dir.glob("rank_*.json" ) )
if len(lowerCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase__ : Union[str, Any] = lmap(lowerCAmelCase , lowerCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 660 | 1 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A__ : Union[str, Any] = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
A__ : List[str] = json.load(f)
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Dict:
return FSMTTokenizer.from_pretrained(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : Tuple = FSMTForConditionalGeneration.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase__ : Optional[Any] = F"facebook/wmt19-{pair}"
UpperCAmelCase__ : List[Any] = self.get_tokenizer(__UpperCamelCase )
UpperCAmelCase__ : str = self.get_model(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = bleu_data[pair]["src"]
UpperCAmelCase__ : List[str] = bleu_data[pair]["tgt"]
UpperCAmelCase__ : int = tokenizer(__UpperCamelCase , return_tensors="pt" , truncation=__UpperCamelCase , padding="longest" ).to(__UpperCamelCase )
UpperCAmelCase__ : List[str] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase__ : Union[str, Any] = tokenizer.batch_decode(
__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )
UpperCAmelCase__ : Any = calculate_bleu(__UpperCamelCase , __UpperCamelCase )
print(__UpperCamelCase )
self.assertGreaterEqual(scores["bleu"] , __UpperCamelCase )
| 660 |
"""simple docstring"""
from timeit import timeit
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ):
'''simple docstring'''
def do_benchmark(lowerCAmelCase : int ) -> None:
UpperCAmelCase__ : Dict = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Tuple = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowerCAmelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Any = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowerCAmelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 660 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase , lowerCAmelCase ) and number_of_steps > 0
), F"number_of_steps needs to be positive integer, your input {number_of_steps}"
if number_of_steps == 1:
return 1
UpperCAmelCase__ , UpperCAmelCase__ : str = 1, 1
for _ in range(number_of_steps - 1 ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowercase ( unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[Any] = load_tool("text-classification" )
self.tool.setup()
UpperCAmelCase__ : List[str] = load_tool("text-classification" , remote=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[Any] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Any = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : str = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
| 660 | 1 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = (DDIMParallelScheduler,)
_A = (('eta', 0.0), ('num_inference_steps', 50))
def lowerCAmelCase__ ( self , **__UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[Any] = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__UpperCamelCase )
return config
def lowerCAmelCase__ ( self , **__UpperCamelCase )-> int:
UpperCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase__ : Union[str, Any] = self.get_scheduler_config(**__UpperCamelCase )
UpperCAmelCase__ : List[str] = scheduler_class(**__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = 10, 0.0
UpperCAmelCase__ : Optional[int] = self.dummy_model()
UpperCAmelCase__ : int = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for t in scheduler.timesteps:
UpperCAmelCase__ : List[Any] = model(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Any = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def lowerCAmelCase__ ( self )-> str:
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> int:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.scheduler_classes[0]
UpperCAmelCase__ : int = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ : Optional[Any] = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def lowerCAmelCase__ ( self )-> List[Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Optional[int]:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Tuple:
self.check_over_configs(thresholding=__UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , )
def lowerCAmelCase__ ( self )-> Optional[Any]:
for t in [1, 10, 49]:
self.check_over_forward(time_step=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=__UpperCamelCase , num_inference_steps=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Optional[Any]:
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__UpperCamelCase , eta=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase__ : Dict = self.get_scheduler_config()
UpperCAmelCase__ : List[str] = scheduler_class(**__UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_4771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_2460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1E-5
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase__ : List[str] = self.get_scheduler_config()
UpperCAmelCase__ : str = scheduler_class(**__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = 10, 0.0
scheduler.set_timesteps(__UpperCamelCase )
UpperCAmelCase__ : str = self.dummy_model()
UpperCAmelCase__ : Tuple = self.dummy_sample_deter
UpperCAmelCase__ : Dict = self.dummy_sample_deter + 0.1
UpperCAmelCase__ : Optional[Any] = self.dummy_sample_deter - 0.1
UpperCAmelCase__ : Dict = samplea.shape[0]
UpperCAmelCase__ : Optional[int] = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase__ : Any = torch.arange(__UpperCamelCase )[0:3, None].repeat(1 , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase__ : Tuple = scheduler.batch_step_no_noise(__UpperCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __UpperCamelCase )
UpperCAmelCase__ : Tuple = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Tuple = self.full_loop()
UpperCAmelCase__ : Tuple = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCAmelCase__ : Any = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.22_3967 ) < 1E-3
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : str = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase__ : List[str] = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCAmelCase__ : List[Any] = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def lowerCAmelCase__ ( self )-> List[Any]:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ : Tuple = self.full_loop(set_alpha_to_one=__UpperCamelCase , beta_start=0.01 )
UpperCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCAmelCase__ : Dict = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ : str = self.full_loop(set_alpha_to_one=__UpperCamelCase , beta_start=0.01 )
UpperCAmelCase__ : Optional[int] = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCAmelCase__ : int = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 660 |
"""simple docstring"""
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ) ) )
def a__ ( lowerCAmelCase : list[float] ):
'''simple docstring'''
if point:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
for item in point:
if not isinstance(lowerCAmelCase , (int, float) ):
UpperCAmelCase__ : Tuple = (
"Expected a list of numbers as input, found "
F"{type(lowerCAmelCase ).__name__}"
)
raise TypeError(lowerCAmelCase )
else:
UpperCAmelCase__ : Dict = F"Expected a list of numbers as input, found {type(lowerCAmelCase ).__name__}"
raise TypeError(lowerCAmelCase )
else:
raise ValueError("Missing an input" )
def a__ ( lowerCAmelCase : list , lowerCAmelCase : list ):
'''simple docstring'''
_validate_point(lowerCAmelCase )
_validate_point(lowerCAmelCase )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase , lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 1 |
"""simple docstring"""
A__ : str = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
A__ : str = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
A__ : Any = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 660 |
"""simple docstring"""
import math
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( lowerCAmelCase : int = 1_0001 ):
'''simple docstring'''
try:
UpperCAmelCase__ : List[str] = int(lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
UpperCAmelCase__ : list[int] = []
UpperCAmelCase__ : str = 2
while len(lowerCAmelCase ) < nth:
if is_prime(lowerCAmelCase ):
primes.append(lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 1 |
"""simple docstring"""
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase )-> None:
UpperCAmelCase__ : Any = len(__UpperCamelCase )
UpperCAmelCase__ : str = [0] * len_array
if len_array > 0:
UpperCAmelCase__ : List[Any] = array[0]
for i in range(1 , __UpperCamelCase ):
UpperCAmelCase__ : Any = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase__ ( self , __UpperCamelCase )-> bool:
UpperCAmelCase__ : str = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__UpperCamelCase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=0.6 , __UpperCamelCase=None , )-> List[Any]:
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Optional[Any] = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : int = mask_ratio
UpperCAmelCase__ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self )-> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = ViTMAEModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCAmelCase__ : List[Any] = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCAmelCase__ : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = ViTMAEForPreTraining(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : List[str] = model(__UpperCamelCase )
UpperCAmelCase__ : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_A = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_A = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : Any = ViTMAEModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self )-> Dict:
pass
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(__UpperCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Dict = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCAmelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ : str = torch.from_numpy(__UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase__ : Optional[Any] = pt_noise
super().check_pt_tf_models(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[Any] = outputs[0].cpu().numpy()
UpperCAmelCase__ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = model_class.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
# Make sure we don't have nans
UpperCAmelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> List[str]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Any:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self )-> Optional[Any]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self )-> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
pass
@slow
def lowerCAmelCase__ ( self )-> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ViTMAEModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self )-> List[Any]:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self )-> Optional[int]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase__ : Any = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCamelCase )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase__ : List[Any] = ViTMAEConfig()
UpperCAmelCase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase__ : Optional[int] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**__UpperCamelCase , noise=torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase ) )
# verify the logits
UpperCAmelCase__ : str = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCamelCase ) , atol=1E-4 ) )
| 660 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _lowercase :
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0 )-> None:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = row, column
UpperCAmelCase__ : Union[str, Any] = [[default_value for c in range(__UpperCamelCase )] for r in range(__UpperCamelCase )]
def __str__( self )-> str:
UpperCAmelCase__ : List[Any] = F"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
UpperCAmelCase__ : Union[str, Any] = 0
for row_vector in self.array:
for obj in row_vector:
UpperCAmelCase__ : int = max(__UpperCamelCase , len(str(__UpperCamelCase ) ) )
UpperCAmelCase__ : Optional[Any] = F"%{max_element_length}s"
# Make string and return
def single_line(__UpperCamelCase ) -> str:
nonlocal string_format_identifier
UpperCAmelCase__ : Union[str, Any] = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__UpperCamelCase ) for row_vector in self.array )
return s
def __repr__( self )-> str:
return str(self )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> bool:
if not (isinstance(__UpperCamelCase , (list, tuple) ) and len(__UpperCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , __UpperCamelCase )-> Any:
assert self.validate_indicies(__UpperCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , __UpperCamelCase , __UpperCamelCase )-> None:
assert self.validate_indicies(__UpperCamelCase )
UpperCAmelCase__ : Dict = value
def __add__( self , __UpperCamelCase )-> Matrix:
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert self.row == another.row and self.column == another.column
# Add
UpperCAmelCase__ : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase__ : Union[str, Any] = self[r, c] + another[r, c]
return result
def __neg__( self )-> Matrix:
UpperCAmelCase__ : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase__ : List[Any] = -self[r, c]
return result
def __sub__( self , __UpperCamelCase )-> Matrix:
return self + (-another)
def __mul__( self , __UpperCamelCase )-> Matrix:
if isinstance(__UpperCamelCase , (int, float) ): # Scalar multiplication
UpperCAmelCase__ : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase__ : Optional[int] = self[r, c] * another
return result
elif isinstance(__UpperCamelCase , __UpperCamelCase ): # Matrix multiplication
assert self.column == another.row
UpperCAmelCase__ : int = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
UpperCAmelCase__ : List[str] = F"Unsupported type given for another ({type(__UpperCamelCase )})"
raise TypeError(__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Matrix:
UpperCAmelCase__ : Dict = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase__ : List[str] = self[r, c]
return result
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase )-> Any:
assert isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(__UpperCamelCase , __UpperCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
UpperCAmelCase__ : List[str] = v.transpose()
UpperCAmelCase__ : Dict = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a__ ( ):
'''simple docstring'''
# a^(-1)
UpperCAmelCase__ : Optional[int] = Matrix(3 , 3 , 0 )
for i in range(3 ):
UpperCAmelCase__ : List[str] = 1
print(F"a^(-1) is {ainv}" )
# u, v
UpperCAmelCase__ : str = Matrix(3 , 1 , 0 )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = 1, 2, -3
UpperCAmelCase__ : int = Matrix(3 , 1 , 0 )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCAmelCase , lowerCAmelCase )}" )
def a__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 660 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowercase :
'''simple docstring'''
_A = 42
# setable values
_A = 42
_A = 42
_A = None
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
return cls(common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase )
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_A = [e.name for e in FlaxKarrasDiffusionSchedulers]
_A = 42
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return True
@register_to_config
def __init__( self , __UpperCamelCase = 10_00 , __UpperCamelCase = 0.0001 , __UpperCamelCase = 0.02 , __UpperCamelCase = "linear" , __UpperCamelCase = None , __UpperCamelCase = "fixed_small" , __UpperCamelCase = True , __UpperCamelCase = "epsilon" , __UpperCamelCase = jnp.floataa , )-> List[str]:
UpperCAmelCase__ : int = dtype
def lowerCAmelCase__ ( self , __UpperCamelCase = None )-> DDPMSchedulerState:
if common is None:
UpperCAmelCase__ : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None )-> jnp.ndarray:
return sample
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = () )-> DDPMSchedulerState:
UpperCAmelCase__ : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Optional[int] = (jnp.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : Any = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : Dict = jnp.clip(__UpperCamelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Tuple = jnp.log(jnp.clip(__UpperCamelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
UpperCAmelCase__ : Optional[int] = (predicted_variance + 1) / 2
UpperCAmelCase__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , )-> Union[FlaxDDPMSchedulerOutput, Tuple]:
UpperCAmelCase__ : List[str] = timestep
if key is None:
UpperCAmelCase__ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = jnp.split(__UpperCamelCase , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : Optional[Any] = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : Union[str, Any] = 1 - alpha_prod_t
UpperCAmelCase__ : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : Any = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : List[Any] = jnp.clip(__UpperCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : Any = jax.random.split(__UpperCamelCase , num=1 )
UpperCAmelCase__ : int = jax.random.normal(__UpperCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__UpperCamelCase , __UpperCamelCase , predicted_variance=__UpperCamelCase ) ** 0.5) * noise
UpperCAmelCase__ : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__UpperCamelCase , state=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return add_noise_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return get_velocity_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __len__( self )-> Tuple:
return self.config.num_train_timesteps
| 660 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def a__ ( lowerCAmelCase : int = 100 ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 1
UpperCAmelCase__ : int = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase__ : Optional[int] = pre_numerator
UpperCAmelCase__ : Optional[Any] = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase__ : List[Any] = cur_numerator
UpperCAmelCase__ : str = e_cont * pre_numerator + temp
return sum_digits(lowerCAmelCase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ''
_A = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
super().__init__(self , **__UpperCamelCase )
UpperCAmelCase__ : int = repo_info
UpperCAmelCase__ : Optional[int] = token
UpperCAmelCase__ : Optional[Any] = None
def lowerCAmelCase__ ( self )-> Optional[Any]:
if self.dir_cache is None:
UpperCAmelCase__ : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ : str = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"name": str(__UpperCamelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , )-> List[Any]:
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> List[str]:
self._get_dirs()
UpperCAmelCase__ : Union[str, Any] = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase )-> str:
self._get_dirs()
UpperCAmelCase__ : str = PurePosixPath(path.strip("/" ) )
UpperCAmelCase__ : Optional[Any] = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ : Optional[int] = PurePosixPath(p.strip("/" ) )
UpperCAmelCase__ : Dict = p.parent
if root == path:
UpperCAmelCase__ : Tuple = f
UpperCAmelCase__ : List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A__ : str = logging.get_logger(__name__)
A__ : List[Any] = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_A = 'resnet'
_A = ['basic', 'bottleneck']
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=64 , __UpperCamelCase=[2_56, 5_12, 10_24, 20_48] , __UpperCamelCase=[3, 4, 6, 3] , __UpperCamelCase="bottleneck" , __UpperCamelCase="relu" , __UpperCamelCase=False , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , )-> Union[str, Any]:
super().__init__(**__UpperCamelCase )
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
UpperCAmelCase__ : List[str] = num_channels
UpperCAmelCase__ : str = embedding_size
UpperCAmelCase__ : Optional[int] = hidden_sizes
UpperCAmelCase__ : str = depths
UpperCAmelCase__ : Optional[int] = layer_type
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : Tuple = downsample_in_first_stage
UpperCAmelCase__ : str = ["stem"] + [F"stage{idx}" for idx in range(1 , len(__UpperCamelCase ) + 1 )]
UpperCAmelCase__ , UpperCAmelCase__ : Any = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self )-> float:
return 1E-3
| 660 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : Dict = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['pixel_values']
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase__ : List[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase__ : int = get_size_dict(__UpperCamelCase , param_name="crop_size" )
UpperCAmelCase__ : Dict = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : List[Any] = do_center_crop
UpperCAmelCase__ : str = crop_size
UpperCAmelCase__ : Optional[int] = resample
UpperCAmelCase__ : int = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : Union[str, Any] = offset
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(__UpperCamelCase , size["shortest_edge"] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
UpperCAmelCase__ : Any = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[Any] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> Tuple:
UpperCAmelCase__ : str = image.astype(np.floataa )
if offset:
UpperCAmelCase__ : Tuple = image - (scale / 2)
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , )-> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[Any] = to_numpy_array(__UpperCamelCase )
if do_resize:
UpperCAmelCase__ : Union[str, Any] = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
UpperCAmelCase__ : int = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
UpperCAmelCase__ : List[str] = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase , offset=__UpperCamelCase )
if do_normalize:
UpperCAmelCase__ : List[Any] = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
UpperCAmelCase__ : Dict = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> PIL.Image.Image:
UpperCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Optional[int] = offset if offset is not None else self.offset
UpperCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Tuple = get_size_dict(__UpperCamelCase , param_name="crop_size" )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase__ : List[str] = make_batched(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , offset=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase__ : Dict = {"pixel_values": videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 660 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase : str , lowerCAmelCase : list[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ""
for word_or_phrase in separated:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(lowerCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 660 |
"""simple docstring"""
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(lowerCAmelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 1 |
"""simple docstring"""
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , __UpperCamelCase , __UpperCamelCase )-> List[str]:
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self , __UpperCamelCase = 1 , __UpperCamelCase = None , __UpperCamelCase = 50 , __UpperCamelCase = "pil" , __UpperCamelCase = True , **__UpperCamelCase , )-> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase__ : List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__UpperCamelCase , )
UpperCAmelCase__ : Dict = image.to(self.device )
# set step values
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase__ : List[Any] = self.unet(__UpperCamelCase , __UpperCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCAmelCase__ : str = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
UpperCAmelCase__ : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase__ : str = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=__UpperCamelCase ), "This is a local test"
| 660 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
A__ : Optional[Any] = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : bool , lowerCAmelCase : bool ):
'''simple docstring'''
def run_func(lowerCAmelCase : Dict ):
@wraps(lowerCAmelCase )
def run_in_eager_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict ):
return func(*lowerCAmelCase , **lowerCAmelCase )
@wraps(lowerCAmelCase )
@tf.function(experimental_compile=lowerCAmelCase )
def run_in_graph_mode(*lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any] ):
return func(*lowerCAmelCase , **lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a__ ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = random.Random()
UpperCAmelCase__ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
_A = 42
_A = "TensorFlow"
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return tf.__version__
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
# initialize GPU on separate process
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Union[str, Any] = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
UpperCAmelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : List[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Any = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_inference )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
UpperCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase__ : Optional[Any] = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_train )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : Union[str, Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Optional[int] = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : str = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : Any = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : List[Any] = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Dict = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : Any = TF_MODEL_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : int = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Optional[Any] = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , training=__UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__UpperCamelCase , training=__UpperCamelCase )
UpperCAmelCase__ : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
UpperCAmelCase__ : List[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase__ : Any = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase__ : Any = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase__ : int = __import__("transformers" , fromlist=[model_class] )
UpperCAmelCase__ : int = getattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
UpperCAmelCase__ : Union[str, Any] = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase__ : Dict = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Union[str, Any] = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCAmelCase__ : Union[str, Any] = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
UpperCAmelCase__ : Any = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
UpperCAmelCase__ : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase__ ( self , __UpperCamelCase )-> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__UpperCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase__ : Optional[Any] = timeit.repeat(
__UpperCamelCase , repeat=self.args.repeat , number=10 , )
return min(__UpperCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def lowerCAmelCase__ ( self , __UpperCamelCase )-> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCAmelCase__ : List[str] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCAmelCase__ : Optional[int] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(__UpperCamelCase )
UpperCAmelCase__ : str = meminfo.used
UpperCAmelCase__ : int = Memory(__UpperCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCAmelCase__ : Any = None
else:
UpperCAmelCase__ : List[Any] = measure_peak_memory_cpu(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = Memory(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase__ : Optional[Any] = stop_memory_tracing(__UpperCamelCase )
if memory is None:
UpperCAmelCase__ : Tuple = summary.total
else:
UpperCAmelCase__ : int = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 660 | 1 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A__ : Dict = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
A__ : List[str] = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def a__ ( lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = SavedModel()
UpperCAmelCase__ : Optional[Any] = []
with open(os.path.join(lowerCAmelCase , "utils" , "tf_ops" , "onnx.json" ) ) as f:
UpperCAmelCase__ : Optional[Any] = json.load(lowerCAmelCase )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowerCAmelCase )] )
with open(lowerCAmelCase , "rb" ) as f:
saved_model.ParseFromString(f.read() )
UpperCAmelCase__ : Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCAmelCase__ : Optional[Any] = sorted(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowerCAmelCase )
if strict and len(lowerCAmelCase ) > 0:
raise Exception(F"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(lowerCAmelCase ) > 0:
print(F"Found the following incompatible ops for the opset {opset}:" )
print(*lowerCAmelCase , sep="\n" )
else:
print(F"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""")
parser.add_argument(
"""--opset""", default=12, type=int, help="""The ONNX opset against which the model has to be tested."""
)
parser.add_argument(
"""--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model."""
)
parser.add_argument(
"""--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)"""
)
A__ : List[str] = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 660 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
A__ : List[str] = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 660 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
UpperCAmelCase__ : int = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Dict = "sshleifer/tiny-gpt2"
UpperCAmelCase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__UpperCamelCase , multi_process=__UpperCamelCase , )
UpperCAmelCase__ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
UpperCAmelCase__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self )-> List[Any]:
UpperCAmelCase__ : List[str] = "sgugger/tiny-distilbert-classification"
UpperCAmelCase__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , only_pretrain_model=__UpperCamelCase , )
UpperCAmelCase__ : int = TensorFlowBenchmark(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : List[Any] = "sshleifer/tiny-gpt2"
UpperCAmelCase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
UpperCAmelCase__ : List[str] = TensorFlowBenchmark(__UpperCamelCase )
UpperCAmelCase__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : Tuple = "sshleifer/tiny-gpt2"
UpperCAmelCase__ : Dict = AutoConfig.from_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__UpperCamelCase , multi_process=__UpperCamelCase , )
UpperCAmelCase__ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase , [config] )
UpperCAmelCase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = "sshleifer/tiny-gpt2"
UpperCAmelCase__ : Dict = AutoConfig.from_pretrained(__UpperCamelCase )
UpperCAmelCase__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
UpperCAmelCase__ : Tuple = TensorFlowBenchmark(__UpperCamelCase , [config] )
UpperCAmelCase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Union[str, Any] = "sshleifer/tiny-gpt2"
UpperCAmelCase__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
UpperCAmelCase__ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase )
UpperCAmelCase__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : Tuple = "sshleifer/tiny-gpt2"
UpperCAmelCase__ : str = AutoConfig.from_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
UpperCAmelCase__ : Dict = TensorFlowBenchmark(__UpperCamelCase , [config] )
UpperCAmelCase__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Optional[Any] = "patrickvonplaten/t5-tiny-random"
UpperCAmelCase__ : List[str] = AutoConfig.from_pretrained(__UpperCamelCase )
UpperCAmelCase__ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
UpperCAmelCase__ : Tuple = TensorFlowBenchmark(__UpperCamelCase , configs=[config] )
UpperCAmelCase__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : Any = "sshleifer/tiny-gpt2"
UpperCAmelCase__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__UpperCamelCase , multi_process=__UpperCamelCase , )
UpperCAmelCase__ : int = TensorFlowBenchmark(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Any = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__UpperCamelCase , save_to_csv=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCamelCase , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(__UpperCamelCase , "inf_mem.csv" ) , env_info_csv_file=os.path.join(__UpperCamelCase , "env.csv" ) , multi_process=__UpperCamelCase , )
UpperCAmelCase__ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , "env.csv" ) ).exists() )
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : List[Any] = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__UpperCamelCase ):
self.assertTrue(hasattr(__UpperCamelCase , "sequential" ) )
self.assertTrue(hasattr(__UpperCamelCase , "cumulative" ) )
self.assertTrue(hasattr(__UpperCamelCase , "current" ) )
self.assertTrue(hasattr(__UpperCamelCase , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCamelCase , "log.txt" ) , log_print=__UpperCamelCase , trace_memory_line_by_line=__UpperCamelCase , eager_mode=__UpperCamelCase , multi_process=__UpperCamelCase , )
UpperCAmelCase__ : str = TensorFlowBenchmark(__UpperCamelCase )
UpperCAmelCase__ : int = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase , "log.txt" ) ).exists() )
| 660 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , __UpperCamelCase = 7_68 , )-> Union[str, Any]:
super().__init__()
UpperCAmelCase__ : str = nn.Parameter(torch.zeros(1 , __UpperCamelCase ) )
UpperCAmelCase__ : Optional[int] = nn.Parameter(torch.ones(1 , __UpperCamelCase ) )
def lowerCAmelCase__ ( self , __UpperCamelCase = None , __UpperCamelCase = None , )-> Any:
UpperCAmelCase__ : Dict = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) )
UpperCAmelCase__ : Any = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) )
return self
def lowerCAmelCase__ ( self , __UpperCamelCase )-> Union[str, Any]:
UpperCAmelCase__ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase__ ( self , __UpperCamelCase )-> List[Any]:
UpperCAmelCase__ : Any = (embeds * self.std) + self.mean
return embeds
| 660 | 1 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = XCLIPTextConfig()
# derive patch size from model name
UpperCAmelCase__ : List[Any] = model_name.find("patch" )
UpperCAmelCase__ : Optional[int] = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
UpperCAmelCase__ : Optional[Any] = XCLIPVisionConfig(patch_size=lowerCAmelCase , num_frames=lowerCAmelCase )
if "large" in model_name:
UpperCAmelCase__ : Any = 768
UpperCAmelCase__ : int = 3072
UpperCAmelCase__ : Dict = 12
UpperCAmelCase__ : Optional[int] = 1024
UpperCAmelCase__ : Optional[Any] = 4096
UpperCAmelCase__ : Optional[Any] = 16
UpperCAmelCase__ : Tuple = 24
UpperCAmelCase__ : List[Any] = 768
UpperCAmelCase__ : int = 3072
if model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase__ : str = 336
UpperCAmelCase__ : int = XCLIPConfig.from_text_vision_configs(lowerCAmelCase , lowerCAmelCase )
if "large" in model_name:
UpperCAmelCase__ : Optional[Any] = 768
return config
def a__ ( lowerCAmelCase : List[Any] ):
'''simple docstring'''
# text encoder
if name == "token_embedding.weight":
UpperCAmelCase__ : Optional[Any] = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
UpperCAmelCase__ : Tuple = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
UpperCAmelCase__ : Any = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
UpperCAmelCase__ : Dict = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
UpperCAmelCase__ : int = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
UpperCAmelCase__ : int = name.replace("c_proj" , "fc2" )
if name.startswith("transformer.resblocks" ):
UpperCAmelCase__ : List[str] = name.replace("transformer.resblocks" , "text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
UpperCAmelCase__ : str = name.replace("attn.out_proj" , "self_attn.out_proj" )
if "ln_final" in name:
UpperCAmelCase__ : Tuple = name.replace("ln_final" , "text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
UpperCAmelCase__ : Optional[Any] = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
UpperCAmelCase__ : Any = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
UpperCAmelCase__ : Optional[Any] = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers" )
if "visual.conv1" in name:
UpperCAmelCase__ : Union[str, Any] = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
UpperCAmelCase__ : Optional[Any] = name.replace("visual.ln_pre" , "vision_model.pre_layernorm" )
if "visual.ln_post" in name:
UpperCAmelCase__ : Union[str, Any] = name.replace("visual.ln_post" , "vision_model.post_layernorm" )
if "visual.proj" in name:
UpperCAmelCase__ : List[Any] = name.replace("visual.proj" , "visual_projection.weight" )
if "text_projection" in name:
UpperCAmelCase__ : int = name.replace("text_projection" , "text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
UpperCAmelCase__ : Tuple = name.replace("prompts_visual_proj" , "prompts_visual_projection" )
if "prompts_visual_ln" in name:
UpperCAmelCase__ : List[str] = name.replace("prompts_visual_ln" , "prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
UpperCAmelCase__ : Optional[int] = name.replace("positional" , "position" )
if name.startswith("mit.resblocks" ):
UpperCAmelCase__ : List[str] = name.replace("mit.resblocks" , "mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
UpperCAmelCase__ : int = name.replace("prompts_generator.norm" , "prompts_generator.layernorm" )
return name
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ : Union[str, Any] = orig_state_dict.pop(lowerCAmelCase )
if "attn.in_proj" in key:
UpperCAmelCase__ : Optional[Any] = key.split("." )
if key.startswith("visual" ):
UpperCAmelCase__ : Union[str, Any] = key_split[3]
UpperCAmelCase__ : int = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCAmelCase__ : Dict = val[
:dim, :
]
UpperCAmelCase__ : Optional[int] = val[
dim : dim * 2, :
]
UpperCAmelCase__ : Any = val[
-dim:, :
]
else:
UpperCAmelCase__ : Optional[int] = val[
:dim
]
UpperCAmelCase__ : List[Any] = val[
dim : dim * 2
]
UpperCAmelCase__ : int = val[
-dim:
]
else:
if "weight" in key:
UpperCAmelCase__ : int = val[
:dim, :
]
UpperCAmelCase__ : Optional[Any] = val[
dim : dim * 2, :
]
UpperCAmelCase__ : Union[str, Any] = val[
-dim:, :
]
else:
UpperCAmelCase__ : List[Any] = val[:dim]
UpperCAmelCase__ : List[Any] = val[
dim : dim * 2
]
UpperCAmelCase__ : Tuple = val[-dim:]
elif key.startswith("mit" ):
UpperCAmelCase__ : Optional[Any] = key_split[2]
UpperCAmelCase__ : str = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCAmelCase__ : Optional[Any] = val[:dim, :]
UpperCAmelCase__ : List[Any] = val[dim : dim * 2, :]
UpperCAmelCase__ : Tuple = val[-dim:, :]
else:
UpperCAmelCase__ : Optional[int] = val[:dim]
UpperCAmelCase__ : Any = val[dim : dim * 2]
UpperCAmelCase__ : Tuple = val[-dim:]
else:
UpperCAmelCase__ : Any = key_split[2]
UpperCAmelCase__ : List[str] = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase__ : List[Any] = val[:dim, :]
UpperCAmelCase__ : List[str] = val[
dim : dim * 2, :
]
UpperCAmelCase__ : List[Any] = val[-dim:, :]
else:
UpperCAmelCase__ : List[str] = val[:dim]
UpperCAmelCase__ : List[str] = val[
dim : dim * 2
]
UpperCAmelCase__ : List[Any] = val[-dim:]
else:
UpperCAmelCase__ : List[str] = rename_key(lowerCAmelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCAmelCase__ : Optional[int] = val.T
UpperCAmelCase__ : List[str] = val
return orig_state_dict
def a__ ( lowerCAmelCase : List[str] ):
'''simple docstring'''
if num_frames == 8:
UpperCAmelCase__ : int = "eating_spaghetti_8_frames.npy"
elif num_frames == 16:
UpperCAmelCase__ : Optional[int] = "eating_spaghetti.npy"
elif num_frames == 32:
UpperCAmelCase__ : Optional[int] = "eating_spaghetti_32_frames.npy"
UpperCAmelCase__ : Any = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=lowerCAmelCase , repo_type="dataset" , )
UpperCAmelCase__ : Union[str, Any] = np.load(lowerCAmelCase )
return list(lowerCAmelCase )
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str=None , lowerCAmelCase : Any=False ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
UpperCAmelCase__ : Dict = model_to_url[model_name]
UpperCAmelCase__ : int = 8
if "16-frames" in model_name:
UpperCAmelCase__ : Optional[Any] = 16
elif "shot" in model_name:
UpperCAmelCase__ : Union[str, Any] = 32
UpperCAmelCase__ : str = get_xclip_config(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[str] = XCLIPModel(lowerCAmelCase )
model.eval()
if "drive" in checkpoint_url:
UpperCAmelCase__ : int = "pytorch_model.bin"
gdown.cached_download(lowerCAmelCase , lowerCAmelCase , quiet=lowerCAmelCase )
UpperCAmelCase__ : Any = torch.load(lowerCAmelCase , map_location="cpu" )["model"]
else:
UpperCAmelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCAmelCase )["model"]
UpperCAmelCase__ : Any = convert_state_dict(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : Tuple = XCLIPModel(lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCAmelCase__ : Dict = 336 if model_name == "xclip-large-patch14-16-frames" else 224
UpperCAmelCase__ : str = VideoMAEImageProcessor(size=lowerCAmelCase )
UpperCAmelCase__ : int = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
UpperCAmelCase__ : int = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
UpperCAmelCase__ : Dict = XCLIPProcessor(image_processor=lowerCAmelCase , tokenizer=lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = prepare_video(lowerCAmelCase )
UpperCAmelCase__ : Dict = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=lowerCAmelCase , return_tensors="pt" , padding=lowerCAmelCase )
print("Shape of pixel values:" , inputs.pixel_values.shape )
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(**lowerCAmelCase )
# Verify outputs
UpperCAmelCase__ : Union[str, Any] = outputs.logits_per_video
UpperCAmelCase__ : List[str] = logits_per_video.softmax(dim=1 )
print("Probs:" , lowerCAmelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCAmelCase__ : Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCAmelCase__ : Dict = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
UpperCAmelCase__ : int = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCAmelCase__ : Any = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
UpperCAmelCase__ : Optional[int] = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase__ : Tuple = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCAmelCase__ : Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCAmelCase__ : str = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCAmelCase__ : List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCAmelCase__ : int = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCAmelCase__ : List[str] = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCAmelCase__ : Dict = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCAmelCase__ : Optional[int] = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCAmelCase__ : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCAmelCase__ : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCAmelCase__ : Any = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCAmelCase__ : Optional[int] = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCAmelCase__ : Dict = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(F"Model name {model_name} not supported" )
assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(lowerCAmelCase , organization="nielsr" )
processor.push_to_hub(lowerCAmelCase , organization="nielsr" )
slow_tokenizer.push_to_hub(lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
A__ : Optional[int] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 660 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
'''simple docstring'''
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[int] = GPTaConfig()
else:
UpperCAmelCase__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = GPTaModel(lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
UpperCAmelCase__ : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase__ : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
A__ : Optional[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 1 |
"""simple docstring"""
from timeit import timeit
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ):
'''simple docstring'''
def do_benchmark(lowerCAmelCase : int ) -> None:
UpperCAmelCase__ : Dict = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Tuple = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowerCAmelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Any = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowerCAmelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 660 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A__ : Optional[int] = ["""small""", """medium""", """large"""]
A__ : Optional[int] = """lm_head.decoder.weight"""
A__ : Dict = """lm_head.weight"""
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.load(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = d.pop(lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
torch.save(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
A__ : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A__ : Tuple = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
A__ : str = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 660 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.