code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from math import pi, sqrt
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(_A ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_A )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __SCREAMING_SNAKE_CASE ( ):
assert gamma(0.5 ) == sqrt(_A )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase: List[Any] = 1.0
while num:
UpperCAmelCase: str = float(input("""Gamma of: """))
print(F'gamma({num}) = {gamma(num)}')
print("""\nEnter 0 to exit...""")
| 355 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Dict = [randint(-1000 , 1000 ) for i in range(10 )]
_lowercase : Tuple = randint(-5000 , 5000 )
return (arr, r)
UpperCAmelCase: int = make_dataset()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
for triplet in permutations(__UpperCAmelCase , 3 ):
if sum(__UpperCAmelCase ) == target:
return tuple(sorted(__UpperCAmelCase ) )
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
arr.sort()
_lowercase : Optional[Any] = len(__UpperCAmelCase )
for i in range(n - 1 ):
_lowercase , _lowercase : str = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Tuple = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_lowercase : Union[str, Any] = """
triplet_sum1(*dataset)
"""
_lowercase : Union[str, Any] = """
triplet_sum2(*dataset)
"""
_lowercase : Dict = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 )
_lowercase : Any = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 )
return (min(__UpperCAmelCase ), min(__UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase: Any = solution_times()
print(F'The time for naive implementation is {times[0]}.')
print(F'The time for optimized implementation is {times[1]}.')
| 336 | 0 |
"""simple docstring"""
import socket
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Optional[int] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_lowercase : Optional[int] = socket.gethostname()
_lowercase : Dict = 12312
sock.connect((host, port) )
sock.send(B"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
_lowercase : List[str] = sock.recv(1024 )
if not data:
break
out_file.write(__UpperCAmelCase )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 356 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : Optional[int] = "BlipImageProcessor"
SCREAMING_SNAKE_CASE_ : Tuple = "AutoTokenizer"
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ )
# add QFormer tokenizer
_lowercase : Optional[int] = qformer_tokenizer
def __call__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
_lowercase : List[Any] = BatchFeature()
if text is not None:
_lowercase : List[str] = self.tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
encoding.update(UpperCAmelCase_ )
_lowercase : Dict = self.qformer_tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
_lowercase : str = qformer_text_encoding.pop("""input_ids""" )
_lowercase : int = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
_lowercase : Optional[Any] = self.image_processor(UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ )
encoding.update(UpperCAmelCase_ )
return encoding
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.tokenizer.model_input_names
_lowercase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
if os.path.isfile(UpperCAmelCase_ ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCAmelCase_ ,exist_ok=UpperCAmelCase_ )
_lowercase : Any = os.path.join(UpperCAmelCase_ ,"""qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(UpperCAmelCase_ )
return super().save_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
_lowercase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ ,subfolder="""qformer_tokenizer""" )
_lowercase : Any = cls._get_arguments_from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
args.append(UpperCAmelCase_ )
return cls(*UpperCAmelCase_ )
| 336 | 0 |
"""simple docstring"""
from __future__ import annotations
import queue
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ):
_lowercase : Optional[int] = data
_lowercase : Dict = None
_lowercase : Optional[Any] = None
def __SCREAMING_SNAKE_CASE ( ):
print("""\n********Press N to stop entering at any point of time********\n""" )
_lowercase : Tuple = input("""Enter the value of the root node: """ ).strip().lower()
_lowercase : queue.Queue = queue.Queue()
_lowercase : int = TreeNode(int(__UpperCAmelCase ) )
q.put(__UpperCAmelCase )
while not q.empty():
_lowercase : Optional[int] = q.get()
_lowercase : int = F"""Enter the left node of {node_found.data}: """
_lowercase : Tuple = input(__UpperCAmelCase ).strip().lower() or '''n'''
if check == "n":
return tree_node
_lowercase : List[str] = TreeNode(int(__UpperCAmelCase ) )
_lowercase : Any = left_node
q.put(__UpperCAmelCase )
_lowercase : List[Any] = F"""Enter the right node of {node_found.data}: """
_lowercase : str = input(__UpperCAmelCase ).strip().lower() or '''n'''
if check == "n":
return tree_node
_lowercase : Union[str, Any] = TreeNode(int(__UpperCAmelCase ) )
_lowercase : Any = right_node
q.put(__UpperCAmelCase )
raise
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
_lowercase : queue.Queue = queue.Queue()
q.put(__UpperCAmelCase )
while not q.empty():
_lowercase : Any = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
_lowercase : queue.Queue = queue.Queue()
q.put(__UpperCAmelCase )
while not q.empty():
_lowercase : Any = []
while not q.empty():
_lowercase : Tuple = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
_lowercase : list[TreeNode] = []
_lowercase : Optional[int] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(__UpperCAmelCase )
_lowercase : Any = n.left
# end of while means current node doesn't have left child
_lowercase : Any = stack.pop()
# start to traverse its right child
_lowercase : List[str] = n.right
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
_lowercase : list[TreeNode] = []
_lowercase : str = node
while n or stack:
while n:
stack.append(__UpperCAmelCase )
_lowercase : str = n.left
_lowercase : List[Any] = stack.pop()
print(n.data , end=""",""" )
_lowercase : List[str] = n.right
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not node:
return
_lowercase : Dict = [], []
_lowercase : int = node
stacka.append(__UpperCAmelCase )
while stacka: # to find the reversed order of post order, store it in stack2
_lowercase : Optional[Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__UpperCAmelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = "" , __UpperCAmelCase=50 , __UpperCAmelCase="*" ):
if not s:
return "\n" + width * char
_lowercase : Optional[int] = divmod(width - len(__UpperCAmelCase ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
UpperCAmelCase = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 357 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase: Tuple = logging.get_logger(__name__)
UpperCAmelCase: List[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ : Optional[int] = ["past_key_values"]
SCREAMING_SNAKE_CASE_ : Tuple = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self ,UpperCAmelCase_=1_00 ,UpperCAmelCase_=5 ,UpperCAmelCase_=1 ,UpperCAmelCase_=1 ,UpperCAmelCase_=2_49 ,UpperCAmelCase_=6 ,UpperCAmelCase_=17 ,UpperCAmelCase_=25 ,UpperCAmelCase_=4 ,UpperCAmelCase_=4 ,UpperCAmelCase_=1_28 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.0006 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=1 ,UpperCAmelCase_=True ,UpperCAmelCase_=1 ,UpperCAmelCase_=5_02_56 ,UpperCAmelCase_=5_02_56 ,**UpperCAmelCase_ ,):
_lowercase : Dict = vocab_size
_lowercase : List[str] = action_weight
_lowercase : int = reward_weight
_lowercase : List[Any] = value_weight
_lowercase : List[str] = max_position_embeddings
_lowercase : Any = block_size
_lowercase : Any = action_dim
_lowercase : List[str] = observation_dim
_lowercase : Union[str, Any] = transition_dim
_lowercase : str = learning_rate
_lowercase : Tuple = n_layer
_lowercase : Optional[int] = n_head
_lowercase : List[str] = n_embd
_lowercase : List[str] = embd_pdrop
_lowercase : Optional[Any] = attn_pdrop
_lowercase : List[Any] = resid_pdrop
_lowercase : str = initializer_range
_lowercase : Optional[Any] = layer_norm_eps
_lowercase : List[Any] = kaiming_initializer_range
_lowercase : List[Any] = use_cache
super().__init__(pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
| 336 | 0 |
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Tuple = None
_lowercase : Dict = None
_lowercase : Any = graph
self._normalize_graph(lowercase__ ,lowercase__ )
_lowercase : Optional[Any] = len(lowercase__ )
_lowercase : str = None
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if sources is int:
_lowercase : List[str] = [sources]
if sinks is int:
_lowercase : Union[str, Any] = [sinks]
if len(lowercase__ ) == 0 or len(lowercase__ ) == 0:
return
_lowercase : int = sources[0]
_lowercase : Optional[Any] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowercase__ ) > 1 or len(lowercase__ ) > 1:
_lowercase : List[str] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_lowercase : Union[str, Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 ,0 )
self.graph.insert(0 ,[0] * size )
for i in sources:
_lowercase : Optional[Any] = max_input_flow
_lowercase : str = 0
_lowercase : Union[str, Any] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_lowercase : str = max_input_flow
_lowercase : Tuple = size - 1
def lowerCamelCase__ ( self ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Dict = algorithm(self )
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ):
_lowercase : Tuple = flow_network
_lowercase : List[str] = flow_network.verticesCount
_lowercase : Union[str, Any] = flow_network.sourceIndex
_lowercase : Optional[int] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_lowercase : str = flow_network.graph
_lowercase : List[Any] = False
def lowerCamelCase__ ( self ):
if not self.executed:
self._algorithm()
_lowercase : Optional[int] = True
def lowerCamelCase__ ( self ):
pass
class UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ):
super().__init__(lowercase__ )
# use this to save your result
_lowercase : str = -1
def lowerCamelCase__ ( self ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ):
super().__init__(lowercase__ )
_lowercase : List[str] = [[0] * self.verticies_count for i in range(self.verticies_count )]
_lowercase : Union[str, Any] = [0] * self.verticies_count
_lowercase : str = [0] * self.verticies_count
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_lowercase : Optional[Any] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_lowercase : List[Any] = 0
while i < len(lowercase__ ):
_lowercase : int = vertices_list[i]
_lowercase : str = self.heights[vertex_index]
self.process_vertex(lowercase__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 ,vertices_list.pop(lowercase__ ) )
_lowercase : List[Any] = 0
else:
i += 1
_lowercase : Tuple = sum(self.preflow[self.source_index] )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowercase__ ,lowercase__ )
self.relabel(lowercase__ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[int] = min(
self.excesses[from_index] ,self.graph[from_index][to_index] - self.preflow[from_index][to_index] ,)
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Optional[Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_lowercase : str = self.heights[to_index]
if min_height is not None:
_lowercase : Dict = min_height + 1
if __name__ == "__main__":
UpperCAmelCase: Union[str, Any] = [0]
UpperCAmelCase: List[str] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase: Dict = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase: int = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase: int = flow_network.find_maximum_flow()
print(F'maximum flow is {maximum_flow}')
| 358 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase: Any = logging.get_logger(__name__)
UpperCAmelCase: List[str] = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "instructblip_vision_model"
def __init__( self ,UpperCAmelCase_=14_08 ,UpperCAmelCase_=61_44 ,UpperCAmelCase_=39 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=14 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=1E-6 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=1E-10 ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Tuple = intermediate_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = patch_size
_lowercase : Optional[Any] = image_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[Any] = attention_dropout
_lowercase : List[Any] = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : Tuple = qkv_bias
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : List[Any] = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "instructblip_qformer"
def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=0 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=2 ,UpperCAmelCase_=14_08 ,**UpperCAmelCase_ ,):
super().__init__(pad_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : List[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : Optional[Any] = hidden_act
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : List[Any] = max_position_embeddings
_lowercase : Tuple = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Any = position_embedding_type
_lowercase : Dict = cross_attention_frequency
_lowercase : Optional[Any] = encoder_hidden_size
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : Dict = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : str = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "instructblip"
SCREAMING_SNAKE_CASE_ : List[str] = True
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=32 ,**UpperCAmelCase_ ):
super().__init__(**UpperCAmelCase_ )
if vision_config is None:
_lowercase : str = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
_lowercase : Any = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
_lowercase : Optional[int] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
_lowercase : int = InstructBlipVisionConfig(**UpperCAmelCase_ )
_lowercase : Optional[int] = InstructBlipQFormerConfig(**UpperCAmelCase_ )
_lowercase : Dict = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
_lowercase : str = CONFIG_MAPPING[text_model_type](**UpperCAmelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : Union[str, Any] = self.text_config.is_encoder_decoder
_lowercase : List[str] = num_query_tokens
_lowercase : List[str] = self.vision_config.hidden_size
_lowercase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : Union[str, Any] = 1.0
_lowercase : Dict = 0.02
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowercase : int = self.vision_config.to_dict()
_lowercase : Any = self.qformer_config.to_dict()
_lowercase : Any = self.text_config.to_dict()
_lowercase : Optional[int] = self.__class__.model_type
return output
| 336 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCAmelCase: int = logging.get_logger(__name__)
UpperCAmelCase: Dict = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = "perceiver"
def __init__( self ,UpperCAmelCase_=2_56 ,UpperCAmelCase_=12_80 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=1 ,UpperCAmelCase_=26 ,UpperCAmelCase_=8 ,UpperCAmelCase_=8 ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_="kv" ,UpperCAmelCase_=1 ,UpperCAmelCase_=1 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=True ,UpperCAmelCase_=2_62 ,UpperCAmelCase_=20_48 ,UpperCAmelCase_=56 ,UpperCAmelCase_=[3_68, 4_96] ,UpperCAmelCase_=16 ,UpperCAmelCase_=19_20 ,UpperCAmelCase_=16 ,UpperCAmelCase_=[1, 16, 2_24, 2_24] ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : List[str] = num_latents
_lowercase : Any = d_latents
_lowercase : str = d_model
_lowercase : Optional[int] = num_blocks
_lowercase : Optional[int] = num_self_attends_per_block
_lowercase : Optional[int] = num_self_attention_heads
_lowercase : Optional[int] = num_cross_attention_heads
_lowercase : str = qk_channels
_lowercase : Optional[int] = v_channels
_lowercase : str = cross_attention_shape_for_attention
_lowercase : int = self_attention_widening_factor
_lowercase : int = cross_attention_widening_factor
_lowercase : List[str] = hidden_act
_lowercase : List[str] = attention_probs_dropout_prob
_lowercase : Optional[int] = initializer_range
_lowercase : Optional[Any] = layer_norm_eps
_lowercase : Tuple = use_query_residual
# masked language modeling attributes
_lowercase : int = vocab_size
_lowercase : Union[str, Any] = max_position_embeddings
# image classification attributes
_lowercase : Union[str, Any] = image_size
# flow attributes
_lowercase : List[Any] = train_size
# multimodal autoencoding attributes
_lowercase : List[Any] = num_frames
_lowercase : Union[str, Any] = audio_samples_per_frame
_lowercase : List[str] = samples_per_patch
_lowercase : Tuple = output_shape
class UpperCamelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def lowerCamelCase__ ( self ):
if self.task == "multiple-choice":
_lowercase : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def lowerCamelCase__ ( self ):
return 1E-4
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = -1 ,UpperCAmelCase_ = -1 ,UpperCAmelCase_ = -1 ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 3 ,UpperCAmelCase_ = 40 ,UpperCAmelCase_ = 40 ,):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase : Any = compute_effective_axis_dimension(
UpperCAmelCase_ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase : str = preprocessor.num_special_tokens_to_add(UpperCAmelCase_ )
_lowercase : int = compute_effective_axis_dimension(
UpperCAmelCase_ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=UpperCAmelCase_ )
# Generate dummy inputs according to compute batch and sequence
_lowercase : Dict = [' '.join(["""a"""] ) * seq_length] * batch_size
_lowercase : Union[str, Any] = dict(preprocessor(UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ) )
_lowercase : List[str] = inputs.pop("""input_ids""" )
return inputs
elif isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase : Union[str, Any] = compute_effective_axis_dimension(UpperCAmelCase_ ,fixed_dimension=OnnxConfig.default_fixed_batch )
_lowercase : Optional[int] = self._generate_dummy_images(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
_lowercase : Union[str, Any] = dict(preprocessor(images=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ) )
_lowercase : List[Any] = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 359 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if k in (0.04, 0.06):
_lowercase : Optional[Any] = k
_lowercase : Optional[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self ):
return str(self.k )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 )
_lowercase , _lowercase : Dict = img.shape
_lowercase : list[list[int]] = []
_lowercase : int = img.copy()
_lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB )
_lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ )
_lowercase : Optional[int] = dx**2
_lowercase : Optional[Any] = dy**2
_lowercase : Optional[Any] = dx * dy
_lowercase : List[str] = 0.04
_lowercase : Optional[Any] = self.window_size // 2
for y in range(UpperCAmelCase_ ,h - offset ):
for x in range(UpperCAmelCase_ ,w - offset ):
_lowercase : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Dict = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Union[str, Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : int = (wxx * wyy) - (wxy**2)
_lowercase : Union[str, Any] = wxx + wyy
_lowercase : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,2_55 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3)
UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 336 | 0 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
UpperCAmelCase: str = HfArgumentParser(InitializationArguments)
UpperCAmelCase: Tuple = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
UpperCAmelCase: Dict = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
UpperCAmelCase: List[Any] = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
UpperCAmelCase: List[str] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
UpperCAmelCase: Dict = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 360 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = LEDTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = LEDTokenizerFast
SCREAMING_SNAKE_CASE_ : List[str] = True
def lowerCamelCase__ ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowercase : List[Any] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowercase : Dict = {"""unk_token""": """<unk>"""}
_lowercase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowercase : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(UpperCAmelCase_ ,max_length=len(UpperCAmelCase_ ) ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
_lowercase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIn("""input_ids""" ,UpperCAmelCase_ )
self.assertIn("""attention_mask""" ,UpperCAmelCase_ )
self.assertNotIn("""labels""" ,UpperCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(text_target=UpperCAmelCase_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" )
self.assertEqual(32 ,targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : List[Any] = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""] ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual(batch.input_ids.shape ,(2, 51_22) )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = ["""A long paragraph for summarization."""]
_lowercase : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : List[str] = tokenizer(text_target=UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : Union[str, Any] = inputs["""input_ids"""]
_lowercase : List[str] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : str = ["""Summary of the text.""", """Another summary."""]
_lowercase : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowercase : Any = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ )
_lowercase : str = [[0] * len(UpperCAmelCase_ ) for x in encoded_output["""input_ids"""]]
_lowercase : Optional[int] = tokenizer.pad(UpperCAmelCase_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Dict = """A, <mask> AllenNLP sentence."""
_lowercase : List[Any] = tokenizer_r.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
_lowercase : Any = tokenizer_p.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
_lowercase : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 336 | 0 |
"""simple docstring"""
import math
import random
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
UpperCAmelCase: Tuple = 0.02
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Union[str, Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__UpperCAmelCase ):
# Forward propagation
_lowercase : Union[str, Any] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
_lowercase : Union[str, Any] = (expected / 100) - layer_a
# Error delta
_lowercase : Union[str, Any] = layer_1_error * sigmoid_function(__UpperCAmelCase , __UpperCAmelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase: Optional[Any] = int(input("""Expected value: """))
UpperCAmelCase: Any = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 361 |
"""simple docstring"""
import argparse
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Any = f.readlines()
_lowercase : Optional[int] = F"""class {class_name}("""
_lowercase : List[str] = F"""{4 * " "}def {test_name}("""
_lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}"""
_lowercase : int = F"""{16 * " "}{correct_line.split()[0]}"""
_lowercase : str = False
_lowercase : Optional[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = False
_lowercase : int = 0
_lowercase : Tuple = 0
_lowercase : Union[str, Any] = []
for line in lines:
if line.startswith(__UpperCAmelCase ):
_lowercase : List[str] = True
elif in_class and line.startswith(__UpperCAmelCase ):
_lowercase : str = True
elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )):
_lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : Optional[int] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Optional[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_lowercase : Union[str, Any] = False
else:
new_lines.append(__UpperCAmelCase )
with open(__UpperCAmelCase , """w""" ) as f:
for line in new_lines:
f.write(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ):
if fail is not None:
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Dict = {l.strip() for l in f.readlines()}
else:
_lowercase : int = None
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : int = f.readlines()
_lowercase : int = defaultdict(__UpperCAmelCase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : int = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase: List[Any] = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
UpperCAmelCase: Any = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 336 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return 0.0
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowercase : int = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Optional[Any] = 512
_lowercase : Union[str, Any] = [1] + [0] * (size - 1)
_lowercase : Optional[int] = [filter_type.process(__lowerCAmelCase ) for item in inputs]
_lowercase : Tuple = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowercase : str = np.abs(np.fft.fft(__lowerCAmelCase ) )
_lowercase : str = 20 * np.logaa(__lowerCAmelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
_lowercase : List[str] = get_bounds(__lowerCAmelCase , __lowerCAmelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__lowerCAmelCase )
plt.show()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Optional[int] = 512
_lowercase : Union[str, Any] = [1] + [0] * (size - 1)
_lowercase : Union[str, Any] = [filter_type.process(__lowerCAmelCase ) for item in inputs]
_lowercase : List[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowercase : str = np.angle(np.fft.fft(__lowerCAmelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__lowerCAmelCase , -2 * pi ) )
plt.show()
| 362 |
"""simple docstring"""
UpperCAmelCase: List[str] = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 336 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
def __SCREAMING_SNAKE_CASE ( *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(lowercase__ , ["""torch"""] )
def __SCREAMING_SNAKE_CASE ( *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(lowercase__ , ["""torch"""] )
def __SCREAMING_SNAKE_CASE ( *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(lowercase__ , ["""torch"""] )
def __SCREAMING_SNAKE_CASE ( *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(lowercase__ , ["""torch"""] )
def __SCREAMING_SNAKE_CASE ( *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(lowercase__ , ["""torch"""] )
def __SCREAMING_SNAKE_CASE ( *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(lowercase__ , ["""torch"""] )
def __SCREAMING_SNAKE_CASE ( *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(lowercase__ , ["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
| 363 |
"""simple docstring"""
UpperCAmelCase: str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCAmelCase: Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCAmelCase: int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 336 | 0 |
"""simple docstring"""
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["image_processor"]
SCREAMING_SNAKE_CASE_ : Optional[int] = "SamImageProcessor"
def __init__( self ,UpperCAmelCase_ ):
super().__init__(_SCREAMING_SNAKE_CASE )
_lowercase : Tuple = self.image_processor
_lowercase : Optional[int] = -10
_lowercase : List[Any] = self.image_processor.size["longest_edge"]
def __call__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
_lowercase : str = self.image_processor(
_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
# pop arguments that are not used in the foward but used nevertheless
_lowercase : int = encoding_image_processor["original_sizes"]
if hasattr(_SCREAMING_SNAKE_CASE ,"""numpy""" ): # Checks if Torch or TF tensor
_lowercase : Union[str, Any] = original_sizes.numpy()
_lowercase : int = self._check_and_preprocess_points(
input_points=_SCREAMING_SNAKE_CASE ,input_labels=_SCREAMING_SNAKE_CASE ,input_boxes=_SCREAMING_SNAKE_CASE ,)
_lowercase : Tuple = self._normalize_and_convert(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,input_points=_SCREAMING_SNAKE_CASE ,input_labels=_SCREAMING_SNAKE_CASE ,input_boxes=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,)
return encoding_image_processor
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_="pt" ,):
if input_points is not None:
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
_lowercase : Any = [
self._normalize_coordinates(self.target_size ,_SCREAMING_SNAKE_CASE ,original_sizes[0] ) for point in input_points
]
else:
_lowercase : Tuple = [
self._normalize_coordinates(self.target_size ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for point, original_size in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_lowercase : Optional[Any] = self._pad_points_and_labels(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = np.array(_SCREAMING_SNAKE_CASE )
if input_labels is not None:
_lowercase : Any = np.array(_SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = [
self._normalize_coordinates(self.target_size ,_SCREAMING_SNAKE_CASE ,original_sizes[0] ,is_bounding_box=_SCREAMING_SNAKE_CASE )
for box in input_boxes
]
else:
_lowercase : str = [
self._normalize_coordinates(self.target_size ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,is_bounding_box=_SCREAMING_SNAKE_CASE )
for box, original_size in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
]
_lowercase : str = np.array(_SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if return_tensors == "pt":
_lowercase : Any = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
_lowercase : Union[str, Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_lowercase : Dict = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
_lowercase : int = tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_lowercase : Optional[int] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
_lowercase : List[str] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_lowercase : Any = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
_lowercase : Optional[int] = tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_lowercase : Union[str, Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
_lowercase : Any = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_lowercase : int = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
_lowercase : Tuple = tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Any = max([point.shape[0] for point in input_points] )
_lowercase : Dict = []
for i, point in enumerate(_SCREAMING_SNAKE_CASE ):
if point.shape[0] != expected_nb_points:
_lowercase : Tuple = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] ,axis=0 )
_lowercase : str = np.append(input_labels[i] ,[self.point_pad_value] )
processed_input_points.append(_SCREAMING_SNAKE_CASE )
_lowercase : Tuple = processed_input_points
return input_points, input_labels
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_=False ):
_lowercase : Dict = original_size
_lowercase : Optional[int] = self.image_processor._get_preprocess_shape(_SCREAMING_SNAKE_CASE ,longest_edge=_SCREAMING_SNAKE_CASE )
_lowercase : int = deepcopy(_SCREAMING_SNAKE_CASE ).astype(_SCREAMING_SNAKE_CASE )
if is_bounding_box:
_lowercase : Tuple = coords.reshape(-1 ,2 ,2 )
_lowercase : str = coords[..., 0] * (new_w / old_w)
_lowercase : str = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_lowercase : List[Any] = coords.reshape(-1 ,4 )
return coords
def lowerCamelCase__ ( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,):
if input_points is not None:
if hasattr(_SCREAMING_SNAKE_CASE ,"""numpy""" ): # Checks for TF or Torch tensor
_lowercase : Dict = input_points.numpy().tolist()
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0] ,_SCREAMING_SNAKE_CASE ):
raise ValueError("""Input points must be a list of list of floating points.""" )
_lowercase : Optional[int] = [np.array(_SCREAMING_SNAKE_CASE ) for input_point in input_points]
else:
_lowercase : Union[str, Any] = None
if input_labels is not None:
if hasattr(_SCREAMING_SNAKE_CASE ,"""numpy""" ):
_lowercase : Any = input_labels.numpy().tolist()
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0] ,_SCREAMING_SNAKE_CASE ):
raise ValueError("""Input labels must be a list of list integers.""" )
_lowercase : Dict = [np.array(_SCREAMING_SNAKE_CASE ) for label in input_labels]
else:
_lowercase : Dict = None
if input_boxes is not None:
if hasattr(_SCREAMING_SNAKE_CASE ,"""numpy""" ):
_lowercase : List[Any] = input_boxes.numpy().tolist()
if (
not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0] ,_SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0][0] ,_SCREAMING_SNAKE_CASE )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
_lowercase : Dict = [np.array(_SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes]
else:
_lowercase : List[Any] = None
return input_points, input_labels, input_boxes
@property
def lowerCamelCase__ ( self ):
_lowercase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(_SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.image_processor.post_process_masks(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
| 364 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
UpperCAmelCase: List[Any] = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
def constraint_to_multiple_of(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=None ):
_lowercase : Union[str, Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowercase : str = math.floor(val / multiple ) * multiple
if x < min_val:
_lowercase : Dict = math.ceil(val / multiple ) * multiple
return x
_lowercase : List[str] = (output_size, output_size) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else output_size
_lowercase , _lowercase : List[Any] = get_image_size(__UpperCAmelCase )
_lowercase , _lowercase : Union[str, Any] = output_size
# determine new height and width
_lowercase : str = output_height / input_height
_lowercase : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowercase : str = scale_width
else:
# fit height
_lowercase : int = scale_height
_lowercase : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=__UpperCAmelCase )
_lowercase : Optional[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__UpperCAmelCase )
return (new_height, new_width)
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["pixel_values"]
def __init__( self ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = 1 / 2_55 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : List[Any] = size if size is not None else {"""height""": 3_84, """width""": 3_84}
_lowercase : str = get_size_dict(UpperCAmelCase_ )
_lowercase : Tuple = do_resize
_lowercase : Any = size
_lowercase : List[Any] = keep_aspect_ratio
_lowercase : Any = ensure_multiple_of
_lowercase : str = resample
_lowercase : Optional[Any] = do_rescale
_lowercase : List[Any] = rescale_factor
_lowercase : Union[str, Any] = do_normalize
_lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = PILImageResampling.BICUBIC ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
_lowercase : Optional[Any] = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_lowercase : Dict = get_resize_output_image_size(
UpperCAmelCase_ ,output_size=(size["""height"""], size["""width"""]) ,keep_aspect_ratio=UpperCAmelCase_ ,multiple=UpperCAmelCase_ ,)
return resize(UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return rescale(UpperCAmelCase_ ,scale=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return normalize(UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = ChannelDimension.FIRST ,**UpperCAmelCase_ ,):
_lowercase : Any = do_resize if do_resize is not None else self.do_resize
_lowercase : List[str] = size if size is not None else self.size
_lowercase : int = get_size_dict(UpperCAmelCase_ )
_lowercase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowercase : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowercase : List[str] = resample if resample is not None else self.resample
_lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : str = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_lowercase : int = image_std if image_std is not None else self.image_std
_lowercase : Union[str, Any] = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowercase : int = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
_lowercase : Union[str, Any] = [self.resize(image=UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ) for image in images]
if do_rescale:
_lowercase : int = [self.rescale(image=UpperCAmelCase_ ,scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
_lowercase : str = [self.normalize(image=UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ) for image in images]
_lowercase : Tuple = [to_channel_dimension_format(UpperCAmelCase_ ,UpperCAmelCase_ ) for image in images]
_lowercase : int = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase_ ,tensor_type=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(UpperCAmelCase_ ):
_lowercase : Tuple = target_sizes.numpy()
_lowercase : Optional[Any] = []
for idx in range(len(UpperCAmelCase_ ) ):
_lowercase : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=UpperCAmelCase_ )
_lowercase : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase_ )
else:
_lowercase : Union[str, Any] = logits.argmax(dim=1 )
_lowercase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 336 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Optional[Any] = len(_a )
while cur > 1:
# Find the maximum number in arr
_lowercase : Tuple = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_lowercase : Optional[Any] = arr[mi::-1] + arr[mi + 1 : len(_a )]
# Reverse whole list
_lowercase : Union[str, Any] = arr[cur - 1 :: -1] + arr[cur : len(_a )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCAmelCase: int = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase: Optional[Any] = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 365 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCAmelCase: Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCAmelCase: Tuple = [0, 25, 50]
UpperCAmelCase: List[Any] = [25, 50, 75]
UpperCAmelCase: Optional[int] = fuzz.membership.trimf(X, abca)
UpperCAmelCase: Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCAmelCase: List[Any] = np.ones(75)
UpperCAmelCase: Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCAmelCase: str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCAmelCase: List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCAmelCase: int = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCAmelCase: int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCAmelCase: List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCAmelCase: int = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 336 | 0 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = """Wav2Vec2FeatureExtractor"""
SCREAMING_SNAKE_CASE_ : Tuple = """AutoTokenizer"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
super().__init__(_lowercase ,_lowercase )
_lowercase : int = self.feature_extractor
_lowercase : List[Any] = False
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
try:
return super().from_pretrained(_lowercase ,**_lowercase )
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ ,_lowercase ,)
_lowercase : str = WavaVecaFeatureExtractor.from_pretrained(_lowercase ,**_lowercase )
_lowercase : Optional[int] = WavaVecaCTCTokenizer.from_pretrained(_lowercase ,**_lowercase )
return cls(feature_extractor=_lowercase ,tokenizer=_lowercase )
def __call__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowercase ,**_lowercase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
_lowercase : List[str] = kwargs.pop("""raw_speech""" )
else:
_lowercase : Dict = kwargs.pop("""audio""" ,_lowercase )
_lowercase : List[str] = kwargs.pop("""sampling_rate""" ,_lowercase )
_lowercase : Optional[int] = kwargs.pop("""text""" ,_lowercase )
if len(_lowercase ) > 0:
_lowercase : Tuple = args[0]
_lowercase : str = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
_lowercase : Optional[Any] = self.feature_extractor(_lowercase ,*_lowercase ,sampling_rate=_lowercase ,**_lowercase )
if text is not None:
_lowercase : Any = self.tokenizer(_lowercase ,**_lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowercase : Union[str, Any] = encodings["""input_ids"""]
return inputs
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_lowercase ,**_lowercase )
_lowercase : Tuple = kwargs.pop("""input_features""" ,_lowercase )
_lowercase : Tuple = kwargs.pop("""labels""" ,_lowercase )
if len(_lowercase ) > 0:
_lowercase : int = args[0]
_lowercase : Dict = args[1:]
if input_features is not None:
_lowercase : str = self.feature_extractor.pad(_lowercase ,*_lowercase ,**_lowercase )
if labels is not None:
_lowercase : List[Any] = self.tokenizer.pad(_lowercase ,**_lowercase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_lowercase : Union[str, Any] = labels["""input_ids"""]
return input_features
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.batch_decode(*_lowercase ,**_lowercase )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.decode(*_lowercase ,**_lowercase )
@contextmanager
def lowerCamelCase__ ( self ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
_lowercase : Optional[int] = True
_lowercase : str = self.tokenizer
yield
_lowercase : Any = self.feature_extractor
_lowercase : Optional[int] = False
| 366 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : str = tempfile.mkdtemp()
# fmt: off
_lowercase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowercase : Optional[int] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
_lowercase : Dict = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
_lowercase : List[Any] = os.path.join(self.tmpdirname ,UpperCAmelCase_ )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
_lowercase : Tuple = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : List[Any] = self.get_rust_tokenizer()
_lowercase : List[Any] = self.get_image_processor()
_lowercase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
_lowercase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCAmelCase_ )
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
_lowercase : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_lowercase : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
_lowercase : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[int] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : int = self.prepare_image_inputs()
_lowercase : str = image_processor(UpperCAmelCase_ ,return_tensors="""np""" )
_lowercase : int = processor(images=UpperCAmelCase_ ,return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : List[Any] = """lower newer"""
_lowercase : Any = processor(text=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : str = """lower newer"""
_lowercase : List[Any] = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : int = processor.batch_decode(UpperCAmelCase_ )
_lowercase : Tuple = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Optional[Any] = """lower newer"""
_lowercase : Any = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 336 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase: str = logging.get_logger(__name__)
UpperCAmelCase: Optional[int] = {
"""nielsr/canine-s""": 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
UpperCAmelCase: Tuple = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
UpperCAmelCase: List[Any] = 0
UpperCAmelCase: List[Any] = 0xe0_00
UpperCAmelCase: str = 0xe0_01
UpperCAmelCase: Union[str, Any] = 0xe0_02
UpperCAmelCase: Optional[int] = 0xe0_03
UpperCAmelCase: Union[str, Any] = 0xe0_04
# Maps special codepoints to human-readable names.
UpperCAmelCase: List[str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: """[CLS]""",
SEP: """[SEP]""",
BOS: """[BOS]""",
MASK: """[MASK]""",
PAD: """[PAD]""",
RESERVED: """[RESERVED]""",
}
# Maps special codepoint human-readable names to their codepoint values.
UpperCAmelCase: Dict = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,UpperCAmelCase_=chr(_UpperCAmelCase ) ,UpperCAmelCase_=chr(_UpperCAmelCase ) ,UpperCAmelCase_=chr(_UpperCAmelCase ) ,UpperCAmelCase_=chr(_UpperCAmelCase ) ,UpperCAmelCase_=chr(_UpperCAmelCase ) ,UpperCAmelCase_=chr(_UpperCAmelCase ) ,UpperCAmelCase_=False ,UpperCAmelCase_=20_48 ,**UpperCAmelCase_ ,):
_lowercase : List[Any] = AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else bos_token
_lowercase : Optional[Any] = AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else eos_token
_lowercase : int = AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else sep_token
_lowercase : Optional[Any] = AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else cls_token
_lowercase : List[Any] = AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowercase : str = AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else mask_token
super().__init__(
bos_token=_UpperCAmelCase ,eos_token=_UpperCAmelCase ,sep_token=_UpperCAmelCase ,cls_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,mask_token=_UpperCAmelCase ,add_prefix_space=_UpperCAmelCase ,model_max_length=_UpperCAmelCase ,**_UpperCAmelCase ,)
# Creates a mapping for looking up the IDs of special symbols.
_lowercase : Any = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
_lowercase : Any = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
_lowercase : Optional[int] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
_lowercase : str = UNICODE_VOCAB_SIZE
_lowercase : Optional[Any] = len(self._special_codepoints )
@property
def lowerCamelCase__ ( self ):
return self._unicode_vocab_size
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return list(_UpperCAmelCase )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
try:
return ord(_UpperCAmelCase )
except TypeError:
raise ValueError(f"""invalid token: \'{token}\'""" )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(_UpperCAmelCase )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return "".join(_UpperCAmelCase )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : str = [self.sep_token_id]
_lowercase : Union[str, Any] = [self.cls_token_id]
_lowercase : Dict = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase ,token_ids_a=_UpperCAmelCase ,already_has_special_tokens=_UpperCAmelCase )
_lowercase : Optional[Any] = [1] + ([0] * len(_UpperCAmelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(_UpperCAmelCase )) + [1]
return result
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Union[str, Any] = [self.sep_token_id]
_lowercase : int = [self.cls_token_id]
_lowercase : Union[str, Any] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
return ()
| 367 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCAmelCase: str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , ):
import pyspark
def generate_fn():
_lowercase : List[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
_lowercase : Optional[int] = df_with_partition_id.select("""*""" ).where(F"""part_id = {partition_id}""" ).drop("""part_id""" )
_lowercase : int = partition_df.collect()
_lowercase : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class UpperCamelCase ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ,):
_lowercase : Union[str, Any] = df
_lowercase : List[str] = partition_order or range(self.df.rdd.getNumPartitions() )
_lowercase : Tuple = _generate_iterable_examples(self.df ,self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCAmelCase_ )
return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = self.split_shard_indices_by_worker(UpperCAmelCase_ ,UpperCAmelCase_ )
return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ )
@property
def lowerCamelCase__ ( self ):
return len(self.partition_order )
class UpperCamelCase ( datasets.DatasetBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = SparkConfig
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
import pyspark
_lowercase : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
_lowercase : List[Any] = df
_lowercase : int = working_dir
super().__init__(
cache_dir=UpperCAmelCase_ ,config_name=str(self.df.semanticHash() ) ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(UpperCAmelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir ,exist_ok=UpperCAmelCase_ )
_lowercase : Union[str, Any] = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCAmelCase_ ,"""a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowercase : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(UpperCAmelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def lowerCamelCase__ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
import pyspark
def get_arrow_batch_size(UpperCAmelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
_lowercase : List[str] = self.df.count()
_lowercase : List[str] = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowercase : Union[str, Any] = (
self.df.limit(UpperCAmelCase_ )
.repartition(1 )
.mapInArrow(UpperCAmelCase_ ,"""batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowercase : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowercase : int = min(UpperCAmelCase_ ,int(approx_total_size / max_shard_size ) )
_lowercase : List[Any] = self.df.repartition(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
import pyspark
_lowercase : Union[str, Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter
_lowercase : List[Any] = os.path.join(self._working_dir ,os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath
_lowercase : Any = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowercase : Union[str, Any] = self.config.features
_lowercase : Optional[int] = self._writer_batch_size
_lowercase : Optional[Any] = self._fs.storage_options
def write_arrow(UpperCAmelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowercase : Any = pyspark.TaskContext().taskAttemptId()
_lowercase : List[str] = next(UpperCAmelCase_ ,UpperCAmelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
_lowercase : List[Any] = 0
_lowercase : int = writer_class(
features=UpperCAmelCase_ ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,)
_lowercase : Optional[int] = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCAmelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowercase , _lowercase : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
shard_id += 1
_lowercase : Union[str, Any] = writer_class(
features=writer._features ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,)
_lowercase : Dict = pa.Table.from_batches([batch] )
writer.write_table(UpperCAmelCase_ )
if writer._num_bytes > 0:
_lowercase , _lowercase : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ):
_lowercase : Dict = os.path.join(os.path.dirname(UpperCAmelCase_ ) ,os.path.basename(UpperCAmelCase_ ) )
shutil.move(UpperCAmelCase_ ,UpperCAmelCase_ )
_lowercase : List[str] = (
self.df.mapInArrow(UpperCAmelCase_ ,"""task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = "arrow" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
self._validate_cache_dir()
_lowercase : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCAmelCase_ )
_lowercase : Optional[int] = not is_remote_filesystem(self._fs )
_lowercase : Dict = os.path.join if is_local else posixpath.join
_lowercase : int = """-TTTTT-SSSSS-of-NNNNN"""
_lowercase : Optional[Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
_lowercase : Dict = path_join(self._output_dir ,UpperCAmelCase_ )
_lowercase : List[Any] = 0
_lowercase : Optional[Any] = 0
_lowercase : int = 0
_lowercase : Any = []
_lowercase : Any = []
for task_id, content in self._prepare_split_single(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCAmelCase_ )
_lowercase : Optional[int] = total_num_examples
_lowercase : List[Any] = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
_lowercase : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowercase : Union[str, Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
rename(
UpperCAmelCase_ ,fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace("""TTTTT-SSSSS""" ,f"""{global_shard_id:05d}""" ).replace("""NNNNN""" ,f"""{total_shards:05d}""" ) ,)
_lowercase : Optional[Any] = []
_lowercase : List[str] = 0
for i in range(len(UpperCAmelCase_ ) ):
_lowercase , _lowercase : List[str] = task_id_and_num_shards[i]
for shard_id in range(UpperCAmelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCAmelCase_ ,len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect()
else:
# don't use any pattern
_lowercase : Tuple = 0
_lowercase : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace(UpperCAmelCase_ ,"""""" ) ,)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,):
return SparkExamplesIterable(self.df )
| 336 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase: str = logging.get_logger(__name__)
UpperCAmelCase: int = '''▁'''
UpperCAmelCase: str = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
UpperCAmelCase: List[str] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
UpperCAmelCase: int = {'''vinai/bartpho-syllable''': 1_024}
class UpperCamelCase ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : int = ["""input_ids""", """attention_mask"""]
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_="<s>" ,UpperCAmelCase_="</s>" ,UpperCAmelCase_="</s>" ,UpperCAmelCase_="<s>" ,UpperCAmelCase_="<unk>" ,UpperCAmelCase_="<pad>" ,UpperCAmelCase_="<mask>" ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
# Mask token behave like a normal word, i.e. include the space before it
_lowercase : Optional[int] = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else mask_token
_lowercase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__lowerCamelCase ,)
_lowercase : int = vocab_file
_lowercase : Optional[int] = monolingual_vocab_file
_lowercase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_lowercase : Optional[Any] = {}
_lowercase : Optional[Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCamelCase ) not in self.fairseq_tokens_to_ids:
_lowercase : Tuple = cnt
cnt += 1
with open(__lowerCamelCase ,"""r""" ,encoding="""utf-8""" ) as f:
for line in f.readlines():
_lowercase : int = line.strip().split()[0]
_lowercase : Any = len(self.fairseq_tokens_to_ids )
if str(__lowerCamelCase ) not in self.fairseq_tokens_to_ids:
_lowercase : str = len(self.fairseq_tokens_to_ids )
_lowercase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
_lowercase : int = self.__dict__.copy()
_lowercase : Optional[int] = None
_lowercase : List[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,UpperCAmelCase_ ):
_lowercase : List[str] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
_lowercase : Dict = {}
_lowercase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase : Union[str, Any] = [self.cls_token_id]
_lowercase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase ,token_ids_a=__lowerCamelCase ,already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Any = [self.sep_token_id]
_lowercase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase__ ( self ):
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase__ ( self ):
_lowercase : Tuple = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return self.sp_model.encode(__lowerCamelCase ,out_type=__lowerCamelCase )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : int = """""".join(__lowerCamelCase ).replace(__lowerCamelCase ,""" """ ).strip()
return out_string
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowercase : Tuple = os.path.join(
__lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : List[Any] = os.path.join(
__lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] ,)
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase ,"""wb""" ) as fi:
_lowercase : List[str] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file ,__lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(__lowerCamelCase )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 368 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase: Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = XLNetTokenizer
SCREAMING_SNAKE_CASE_ : Dict = XLNetTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
def lowerCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = """<s>"""
_lowercase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) ,UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""<eod>""" )
self.assertEqual(len(UpperCAmelCase_ ) ,10_06 )
def lowerCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,10_00 )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[2_85, 46, 10, 1_70, 3_82] )
_lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
_lowercase : List[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""▁he""", """ll""", """o"""] )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
@slow
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
_lowercase : int = tokenizer.encode("""sequence builders""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
_lowercase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ,UpperCAmelCase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase__ ( self ):
# fmt: off
_lowercase : Union[str, Any] = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ ,model_name="""xlnet-base-cased""" ,revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" ,)
| 336 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , ):
_lowercase : int = {}
if train_file is not None:
_lowercase : Dict = [train_file]
if eval_file is not None:
_lowercase : Any = [eval_file]
if test_file is not None:
_lowercase : List[str] = [test_file]
_lowercase : List[str] = datasets.load_dataset("""csv""" , data_files=a__ )
_lowercase : Tuple = list(ds[list(files.keys() )[0]].features.keys() )
_lowercase : int = features_name.pop(a__ )
_lowercase : int = list(set(ds[list(files.keys() )[0]][label_name] ) )
_lowercase : Dict = {label: i for i, label in enumerate(a__ )}
_lowercase : List[str] = tokenizer.model_input_names
_lowercase : Dict = {}
if len(a__ ) == 1:
for k in files.keys():
_lowercase : Any = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=a__ , max_length=a__ , padding="""max_length""" ) , batched=a__ , )
elif len(a__ ) == 2:
for k in files.keys():
_lowercase : Tuple = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=a__ , max_length=a__ , padding="""max_length""" , ) , batched=a__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_lowercase : str = {k: v for k, v in ex.items() if k in input_names}
_lowercase : int = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_lowercase : Any = {k: v for k, v in ex.items() if k in input_names}
_lowercase : Dict = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_lowercase : List[Any] = {k: v for k, v in ex.items() if k in input_names}
_lowercase : int = labelaid[ex[label_name]]
yield (d, label)
_lowercase : Tuple = (
tf.data.Dataset.from_generator(
a__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_lowercase : int = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_lowercase : Optional[int] = (
tf.data.Dataset.from_generator(
a__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_lowercase : Optional[int] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_lowercase : Dict = (
tf.data.Dataset.from_generator(
a__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_lowercase : Optional[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCAmelCase: Optional[int] = logging.getLogger(__name__)
@dataclass
class UpperCamelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = field(metadata={"help": "Which column contains the label"} )
SCREAMING_SNAKE_CASE_ : int = field(default=snake_case , metadata={"help": "The path of the training file"} )
SCREAMING_SNAKE_CASE_ : List[str] = field(default=snake_case , metadata={"help": "The path of the development file"} )
SCREAMING_SNAKE_CASE_ : Tuple = field(default=snake_case , metadata={"help": "The path of the test file"} )
SCREAMING_SNAKE_CASE_ : Any = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE_ : str = field(
default=snake_case , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class UpperCamelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE_ : Any = field(
default=snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ : List[str] = field(
default=snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ : Tuple = field(default=snake_case , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE_ : Any = field(
default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_lowercase , _lowercase , _lowercase : Dict = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowercase , _lowercase , _lowercase , _lowercase : List[str] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=a__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_lowercase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(a__ ) , labelaid=a__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_lowercase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , )
def compute_metrics(__UpperCAmelCase ) -> Dict:
_lowercase : int = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_lowercase : List[str] = TFTrainer(
model=a__ , args=a__ , train_dataset=a__ , eval_dataset=a__ , compute_metrics=a__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowercase : List[str] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowercase : Any = trainer.evaluate()
_lowercase : List[Any] = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(a__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(a__ )
return results
if __name__ == "__main__":
main()
| 369 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 0 |
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCAmelCase: Dict = 300 # TEMPERATURE (unit = K)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : int = []
for line in lines:
_lowercase : Dict = re.sub(R"""#.*""" , """""" , __UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(__UpperCAmelCase )
_lowercase : Tuple = """\n""".join(__UpperCAmelCase )
# Make a hash from all this code
_lowercase : Tuple = full_str.encode("""utf-8""" )
return shaaaa(__UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase: Tuple = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase: List[str] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase: Any = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
UpperCAmelCase: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 336 | 0 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
UpperCAmelCase: List[Any] = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
UpperCAmelCase: List[Any] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if "://" in dataset_path:
_lowercase : Any = dataset_path.split("""://""" )[1]
return dataset_path
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Union[str, Any] = not is_remote_filesystem(__UpperCAmelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__UpperCAmelCase ) , fs._strip_protocol(__UpperCAmelCase ) )
else:
fs.mv(__UpperCAmelCase , __UpperCAmelCase , recursive=__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( ):
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_lowercase : List[str] = None
_lowercase : Union[str, Any] = None
_lowercase : Dict = threading.Lock()
| 371 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 336 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class UpperCamelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : torch.Tensor # [batch_size x 3]
SCREAMING_SNAKE_CASE_ : torch.Tensor # [batch_size x 3]
SCREAMING_SNAKE_CASE_ : torch.Tensor # [batch_size x 3]
SCREAMING_SNAKE_CASE_ : torch.Tensor # [batch_size x 3]
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : float
SCREAMING_SNAKE_CASE_ : float
SCREAMING_SNAKE_CASE_ : Tuple[int]
def lowerCamelCase__ ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowerCamelCase__ ( self ):
return torch.from_numpy(np.array([self.width, self.height] ,dtype=np.floataa ) )
def lowerCamelCase__ ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] ,dtype=np.floataa ) )
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = torch.arange(self.height * self.width )
_lowercase : Union[str, Any] = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCAmelCase_ ,self.width ,rounding_mode="""trunc""" ),
] ,axis=1 ,)
return coords
@property
def lowerCamelCase__ ( self ):
_lowercase , *_lowercase : Dict = self.shape
_lowercase : Any = int(np.prod(UpperCAmelCase_ ) )
_lowercase : List[Any] = self.get_image_coords()
_lowercase : int = torch.broadcast_to(coords.unsqueeze(0 ) ,[batch_size * inner_batch_size, *coords.shape] )
_lowercase : Any = self.get_camera_rays(UpperCAmelCase_ )
_lowercase : str = rays.view(UpperCAmelCase_ ,inner_batch_size * self.height * self.width ,2 ,3 )
return rays
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase , *_lowercase , _lowercase : Tuple = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_lowercase : Any = coords.view(UpperCAmelCase_ ,-1 ,2 )
_lowercase : Any = self.resolution()
_lowercase : int = self.fov()
_lowercase : Optional[int] = (flat.float() / (res - 1)) * 2 - 1
_lowercase : List[Any] = fracs * torch.tan(fov / 2 )
_lowercase : Any = fracs.view(UpperCAmelCase_ ,-1 ,2 )
_lowercase : Optional[int] = (
self.z.view(UpperCAmelCase_ ,1 ,3 )
+ self.x.view(UpperCAmelCase_ ,1 ,3 ) * fracs[:, :, :1]
+ self.y.view(UpperCAmelCase_ ,1 ,3 ) * fracs[:, :, 1:]
)
_lowercase : List[str] = directions / directions.norm(dim=-1 ,keepdim=UpperCAmelCase_ )
_lowercase : List[str] = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCAmelCase_ ,1 ,3 ) ,[batch_size, directions.shape[1], 3] ),
directions,
] ,dim=2 ,)
return rays.view(UpperCAmelCase_ ,*UpperCAmelCase_ ,2 ,3 )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin ,x=self.x ,y=self.y ,z=self.z ,width=UpperCAmelCase_ ,height=UpperCAmelCase_ ,x_fov=self.x_fov ,y_fov=self.y_fov ,)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : str = []
_lowercase : str = []
_lowercase : Union[str, Any] = []
_lowercase : Optional[int] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_lowercase : int = np.array([np.sin(lowerCamelCase__ ), np.cos(lowerCamelCase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_lowercase : Any = -z * 4
_lowercase : Any = np.array([np.cos(lowerCamelCase__ ), -np.sin(lowerCamelCase__ ), 0.0] )
_lowercase : List[str] = np.cross(lowerCamelCase__ , lowerCamelCase__ )
origins.append(lowerCamelCase__ )
xs.append(lowerCamelCase__ )
ys.append(lowerCamelCase__ )
zs.append(lowerCamelCase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , width=lowerCamelCase__ , height=lowerCamelCase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCamelCase__ )) , )
| 350 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCAmelCase: Any = generate_large_matrix()
UpperCAmelCase: Dict = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
_lowercase : List[Any] = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_lowercase : Tuple = (left + right) // 2
_lowercase : List[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_lowercase : Dict = mid + 1
else:
_lowercase : Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Any = 0
_lowercase : Optional[int] = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print("""Running benchmarks""" )
_lowercase : Tuple = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_lowercase : Dict = timeit(F"""{func}(grid=grid)""" , setup=__UpperCAmelCase , number=500 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 336 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase: Dict = logging.get_logger(__name__)
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["pixel_values"]
def __init__( self ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = 1 / 2_55 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
super().__init__(**_snake_case )
_lowercase : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
_lowercase : Optional[Any] = get_size_dict(_snake_case ,default_to_square=_snake_case )
_lowercase : Optional[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
_lowercase : Dict = get_size_dict(_snake_case ,param_name="""crop_size""" )
_lowercase : str = do_resize
_lowercase : Tuple = size
_lowercase : Optional[Any] = resample
_lowercase : Tuple = do_center_crop
_lowercase : List[str] = crop_size
_lowercase : List[str] = do_rescale
_lowercase : List[str] = rescale_factor
_lowercase : Tuple = do_normalize
_lowercase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = PILImageResampling.BICUBIC ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
_lowercase : int = get_size_dict(_snake_case ,default_to_square=_snake_case )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_lowercase : Optional[int] = get_resize_output_image_size(_snake_case ,size=size["""shortest_edge"""] ,default_to_square=_snake_case )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
_lowercase : Optional[Any] = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(_snake_case ,size=(size["""height"""], size["""width"""]) ,data_format=_snake_case ,**_snake_case )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ):
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = ChannelDimension.FIRST ,**UpperCAmelCase_ ,):
_lowercase : Tuple = do_resize if do_resize is not None else self.do_resize
_lowercase : str = size if size is not None else self.size
_lowercase : List[Any] = get_size_dict(_snake_case ,default_to_square=_snake_case )
_lowercase : List[str] = resample if resample is not None else self.resample
_lowercase : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
_lowercase : Optional[int] = get_size_dict(_snake_case ,param_name="""crop_size""" )
_lowercase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Dict = image_mean if image_mean is not None else self.image_mean
_lowercase : List[Any] = image_std if image_std is not None else self.image_std
_lowercase : Optional[Any] = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowercase : Any = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
_lowercase : List[Any] = [self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case ) for image in images]
if do_center_crop:
_lowercase : List[Any] = [self.center_crop(image=_snake_case ,size=_snake_case ) for image in images]
if do_rescale:
_lowercase : str = [self.rescale(image=_snake_case ,scale=_snake_case ) for image in images]
if do_normalize:
_lowercase : Optional[Any] = [self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case ) for image in images]
_lowercase : Union[str, Any] = [to_channel_dimension_format(_snake_case ,_snake_case ) for image in images]
_lowercase : List[Any] = {"pixel_values": images}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_snake_case ) != len(_snake_case ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(_snake_case ):
_lowercase : int = target_sizes.numpy()
_lowercase : Dict = []
for idx in range(len(_snake_case ) ):
_lowercase : Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=_snake_case )
_lowercase : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_snake_case )
else:
_lowercase : Optional[int] = logits.argmax(dim=1 )
_lowercase : Optional[int] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 351 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase: List[str] = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase: int = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
re.sub("""<n>""" , """""" , __UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
| 336 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 1000 ):
_lowercase : Any = 2**power
_lowercase : Optional[Any] = str(UpperCamelCase__ )
_lowercase : Union[str, Any] = list(UpperCamelCase__ )
_lowercase : Optional[int] = 0
for i in list_num:
sum_of_num += int(UpperCamelCase__ )
return sum_of_num
if __name__ == "__main__":
UpperCAmelCase: str = int(input("""Enter the power of 2: """).strip())
print("""2 ^ """, power, """ = """, 2**power)
UpperCAmelCase: Any = solution(power)
print("""Sum of the digits is: """, result)
| 352 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowercase : str = []
for i in range(__UpperCAmelCase ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class UpperCamelCase ( snake_case , snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ : str = 2
@register_to_config
def __init__( self ,UpperCAmelCase_ = 10_00 ,UpperCAmelCase_ = 0.00085 ,UpperCAmelCase_ = 0.012 ,UpperCAmelCase_ = "linear" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = "epsilon" ,UpperCAmelCase_ = "linspace" ,UpperCAmelCase_ = 0 ,):
if trained_betas is not None:
_lowercase : str = torch.tensor(UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
_lowercase : Optional[Any] = torch.linspace(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Any = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,UpperCAmelCase_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Any = betas_for_alpha_bar(UpperCAmelCase_ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
_lowercase : Tuple = 1.0 - self.betas
_lowercase : Dict = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ):
if schedule_timesteps is None:
_lowercase : Optional[int] = self.timesteps
_lowercase : Union[str, Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowercase : Optional[Any] = 1 if len(UpperCAmelCase_ ) > 1 else 0
else:
_lowercase : Dict = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
_lowercase : List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCamelCase__ ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
_lowercase : str = self.index_for_timestep(UpperCAmelCase_ )
if self.state_in_first_order:
_lowercase : Optional[Any] = self.sigmas[step_index]
else:
_lowercase : Dict = self.sigmas_interpol[step_index]
_lowercase : Optional[int] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,):
_lowercase : List[str] = num_inference_steps
_lowercase : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowercase : Dict = np.linspace(0 ,num_train_timesteps - 1 ,UpperCAmelCase_ ,dtype=UpperCAmelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowercase : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : str = (np.arange(0 ,UpperCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCAmelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowercase : str = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : Optional[int] = (np.arange(UpperCAmelCase_ ,0 ,-step_ratio )).round().copy().astype(UpperCAmelCase_ )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_lowercase : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowercase : Optional[Any] = torch.from_numpy(np.log(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_lowercase : List[str] = np.interp(UpperCAmelCase_ ,np.arange(0 ,len(UpperCAmelCase_ ) ) ,UpperCAmelCase_ )
_lowercase : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowercase : Any = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ )
# interpolate sigmas
_lowercase : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
_lowercase : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_lowercase : Tuple = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCAmelCase_ ).startswith("""mps""" ):
# mps does not support float64
_lowercase : Tuple = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=torch.floataa )
else:
_lowercase : str = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ )
# interpolate timesteps
_lowercase : int = self.sigma_to_t(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=timesteps.dtype )
_lowercase : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
_lowercase : str = torch.cat([timesteps[:1], interleaved_timesteps] )
_lowercase : List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowercase : Optional[Any] = defaultdict(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
# get log sigma
_lowercase : Optional[Any] = sigma.log()
# get distribution
_lowercase : Optional[int] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowercase : Tuple = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_lowercase : List[Any] = low_idx + 1
_lowercase : int = self.log_sigmas[low_idx]
_lowercase : Any = self.log_sigmas[high_idx]
# interpolate sigmas
_lowercase : Any = (low - log_sigma) / (low - high)
_lowercase : Dict = w.clamp(0 ,1 )
# transform interpolation to time range
_lowercase : List[str] = (1 - w) * low_idx + w * high_idx
_lowercase : Optional[int] = t.view(sigma.shape )
return t
@property
def lowerCamelCase__ ( self ):
return self.sample is None
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = True ,):
_lowercase : Optional[int] = self.index_for_timestep(UpperCAmelCase_ )
# advance index counter by 1
_lowercase : str = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowercase : Any = self.sigmas[step_index]
_lowercase : Any = self.sigmas_interpol[step_index + 1]
_lowercase : Tuple = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowercase : Union[str, Any] = self.sigmas[step_index - 1]
_lowercase : int = self.sigmas_interpol[step_index]
_lowercase : Tuple = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowercase : Any = 0
_lowercase : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowercase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowercase : str = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowercase : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowercase : Any = sigma_interpol - sigma_hat
# store for 2nd order step
_lowercase : List[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowercase : Optional[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowercase : Optional[Any] = sigma_next - sigma_hat
_lowercase : Any = self.sample
_lowercase : Optional[int] = None
_lowercase : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_lowercase : int = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCAmelCase_ ):
# mps does not support float64
_lowercase : str = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_lowercase : Any = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_lowercase : List[Any] = self.timesteps.to(original_samples.device )
_lowercase : Union[str, Any] = timesteps.to(original_samples.device )
_lowercase : List[Any] = [self.index_for_timestep(UpperCAmelCase_ ,UpperCAmelCase_ ) for t in timesteps]
_lowercase : Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowercase : List[Any] = sigma.unsqueeze(-1 )
_lowercase : int = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 336 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase: List[Any] = logging.get_logger(__name__)
class UpperCamelCase ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["pixel_values"]
def __init__( self ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = 1 / 2_55 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : List[Any] = size if size is not None else {"""shortest_edge""": 2_56}
_lowercase : Any = get_size_dict(UpperCAmelCase_ ,default_to_square=UpperCAmelCase_ )
_lowercase : Dict = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
_lowercase : Dict = get_size_dict(UpperCAmelCase_ )
_lowercase : List[str] = do_resize
_lowercase : List[Any] = size
_lowercase : Optional[int] = resample
_lowercase : Dict = do_center_crop
_lowercase : Dict = crop_size
_lowercase : Dict = do_rescale
_lowercase : Any = rescale_factor
_lowercase : Optional[int] = do_normalize
_lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = PILImageResampling.BICUBIC ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
_lowercase : str = get_size_dict(UpperCAmelCase_ ,default_to_square=UpperCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_lowercase : Any = get_resize_output_image_size(UpperCAmelCase_ ,size=size["""shortest_edge"""] ,default_to_square=UpperCAmelCase_ )
return resize(UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
_lowercase : List[Any] = get_size_dict(UpperCAmelCase_ )
return center_crop(UpperCAmelCase_ ,size=(size["""height"""], size["""width"""]) ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ):
return rescale(UpperCAmelCase_ ,scale=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return normalize(UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = ChannelDimension.FIRST ,**UpperCAmelCase_ ,):
_lowercase : List[str] = do_resize if do_resize is not None else self.do_resize
_lowercase : Dict = size if size is not None else self.size
_lowercase : Union[str, Any] = get_size_dict(UpperCAmelCase_ ,default_to_square=UpperCAmelCase_ )
_lowercase : Optional[int] = resample if resample is not None else self.resample
_lowercase : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase : Tuple = crop_size if crop_size is not None else self.crop_size
_lowercase : Any = get_size_dict(UpperCAmelCase_ )
_lowercase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
_lowercase : Optional[int] = image_std if image_std is not None else self.image_std
_lowercase : Any = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowercase : Optional[int] = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
_lowercase : List[str] = [self.resize(image=UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ) for image in images]
if do_center_crop:
_lowercase : Any = [self.center_crop(image=UpperCAmelCase_ ,size=UpperCAmelCase_ ) for image in images]
if do_rescale:
_lowercase : List[Any] = [self.rescale(image=UpperCAmelCase_ ,scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
_lowercase : Union[str, Any] = [self.normalize(image=UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ) for image in images]
_lowercase : Optional[Any] = [to_channel_dimension_format(UpperCAmelCase_ ,UpperCAmelCase_ ) for image in images]
_lowercase : Dict = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase_ ,tensor_type=UpperCAmelCase_ )
| 353 |
"""simple docstring"""
import pprint
import requests
UpperCAmelCase: Tuple = """https://zenquotes.io/api"""
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
UpperCAmelCase: int = random_quotes()
pprint.pprint(response)
| 336 | 0 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = None
class UpperCamelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = PandasConfig
def lowerCamelCase__ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowercase : Tuple = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase_ ,(str, list, tuple) ):
_lowercase : List[Any] = data_files
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowercase : str = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"""files""": files} )]
_lowercase : Optional[int] = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowercase : Tuple = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ ,gen_kwargs={"""files""": files} ) )
return splits
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowercase : List[str] = table_cast(UpperCAmelCase_ ,self.config.features.arrow_schema )
return pa_table
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
for i, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ):
with open(UpperCAmelCase_ ,"""rb""" ) as f:
_lowercase : str = pa.Table.from_pandas(pd.read_pickle(UpperCAmelCase_ ) )
yield i, self._cast_table(UpperCAmelCase_ )
| 354 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : int
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_lowercase : Tuple = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_lowercase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_lowercase : Optional[Any] = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_lowercase : int = [""""""] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = """Provide a string that I will generate its BWT transform: """
UpperCAmelCase: int = input(entry_msg).strip()
UpperCAmelCase: List[str] = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
UpperCAmelCase: Union[str, Any] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
)
| 336 | 0 |
"""simple docstring"""
from manim import *
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : Any = Rectangle(height=0.5 ,width=0.5 )
_lowercase : Union[str, Any] = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_lowercase : Union[str, Any] = [mem.copy() for i in range(6 )]
_lowercase : Union[str, Any] = [mem.copy() for i in range(6 )]
_lowercase : int = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ ,buff=0 )
_lowercase : Dict = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ ,buff=0 )
_lowercase : List[str] = VGroup(lowerCamelCase_ ,lowerCamelCase_ ).arrange(lowerCamelCase_ ,buff=0 )
_lowercase : List[str] = Text("""CPU""" ,font_size=24 )
_lowercase : Optional[Any] = Group(lowerCamelCase_ ,lowerCamelCase_ ).arrange(lowerCamelCase_ ,buff=0.5 ,aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
_lowercase : List[str] = [mem.copy() for i in range(1 )]
_lowercase : int = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ ,buff=0 )
_lowercase : List[str] = Text("""GPU""" ,font_size=24 )
_lowercase : str = Group(lowerCamelCase_ ,lowerCamelCase_ ).arrange(lowerCamelCase_ ,buff=0.5 ,aligned_edge=lowerCamelCase_ )
gpu.align_to(lowerCamelCase_ ,lowerCamelCase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase_ )
_lowercase : List[str] = [mem.copy() for i in range(6 )]
_lowercase : List[str] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ ,buff=0 )
_lowercase : List[Any] = Text("""Model""" ,font_size=24 )
_lowercase : List[str] = Group(lowerCamelCase_ ,lowerCamelCase_ ).arrange(lowerCamelCase_ ,buff=0.5 ,aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase_ ,run_time=1 ) ,Create(lowerCamelCase_ ,run_time=1 ) ,Create(lowerCamelCase_ ,run_time=1 ) ,)
_lowercase : int = MarkupText(
f"""First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.""" ,font_size=24 ,)
_lowercase : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase : List[str] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ ,run_time=2.5 ) ,Write(lowerCamelCase_ ) ,Write(lowerCamelCase_ ) )
self.add(lowerCamelCase_ )
_lowercase : Any = []
_lowercase : int = []
_lowercase : int = []
for i, rect in enumerate(lowerCamelCase_ ):
_lowercase : List[Any] = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ ,opacity=0.7 )
cpu_target.move_to(lowerCamelCase_ )
cpu_target.generate_target()
_lowercase : List[Any] = 0.46 / 4
_lowercase : Optional[Any] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=lowerCamelCase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=lowerCamelCase_ ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=lowerCamelCase_ ,buff=0.0 )
cpu_targs.append(lowerCamelCase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase_ ) )
second_animations.append(MoveToTarget(lowerCamelCase_ ,run_time=1.5 ) )
self.play(*lowerCamelCase_ )
self.play(*lowerCamelCase_ )
self.wait()
| 355 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Dict = [randint(-1000 , 1000 ) for i in range(10 )]
_lowercase : Tuple = randint(-5000 , 5000 )
return (arr, r)
UpperCAmelCase: int = make_dataset()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
for triplet in permutations(__UpperCAmelCase , 3 ):
if sum(__UpperCAmelCase ) == target:
return tuple(sorted(__UpperCAmelCase ) )
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
arr.sort()
_lowercase : Optional[Any] = len(__UpperCAmelCase )
for i in range(n - 1 ):
_lowercase , _lowercase : str = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Tuple = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_lowercase : Union[str, Any] = """
triplet_sum1(*dataset)
"""
_lowercase : Union[str, Any] = """
triplet_sum2(*dataset)
"""
_lowercase : Dict = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 )
_lowercase : Any = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 )
return (min(__UpperCAmelCase ), min(__UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase: Any = solution_times()
print(F'The time for naive implementation is {times[0]}.')
print(F'The time for optimized implementation is {times[1]}.')
| 336 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase: Optional[int] = logging.get_logger(__name__)
UpperCAmelCase: List[Any] = {
'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class UpperCamelCase ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = "poolformer"
def __init__( self ,UpperCAmelCase_=3 ,UpperCAmelCase_=16 ,UpperCAmelCase_=16 ,UpperCAmelCase_=3 ,UpperCAmelCase_=4.0 ,UpperCAmelCase_=[2, 2, 6, 2] ,UpperCAmelCase_=[64, 1_28, 3_20, 5_12] ,UpperCAmelCase_=[7, 3, 3, 3] ,UpperCAmelCase_=[4, 2, 2, 2] ,UpperCAmelCase_=[2, 1, 1, 1] ,UpperCAmelCase_=4 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=True ,UpperCAmelCase_=1E-5 ,UpperCAmelCase_=0.02 ,**UpperCAmelCase_ ,):
_lowercase : Tuple = num_channels
_lowercase : int = patch_size
_lowercase : Union[str, Any] = stride
_lowercase : Dict = padding
_lowercase : str = pool_size
_lowercase : List[str] = hidden_sizes
_lowercase : Tuple = mlp_ratio
_lowercase : Tuple = depths
_lowercase : List[str] = patch_sizes
_lowercase : Tuple = strides
_lowercase : Tuple = num_encoder_blocks
_lowercase : List[Any] = drop_path_rate
_lowercase : Optional[int] = hidden_act
_lowercase : List[str] = use_layer_scale
_lowercase : Optional[Any] = layer_scale_init_value
_lowercase : Dict = initializer_range
super().__init__(**lowercase_ )
class UpperCamelCase ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = version.parse("1.11" )
@property
def lowerCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ ( self ):
return 2E-3
| 356 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : Optional[int] = "BlipImageProcessor"
SCREAMING_SNAKE_CASE_ : Tuple = "AutoTokenizer"
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ )
# add QFormer tokenizer
_lowercase : Optional[int] = qformer_tokenizer
def __call__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
_lowercase : List[Any] = BatchFeature()
if text is not None:
_lowercase : List[str] = self.tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
encoding.update(UpperCAmelCase_ )
_lowercase : Dict = self.qformer_tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
_lowercase : str = qformer_text_encoding.pop("""input_ids""" )
_lowercase : int = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
_lowercase : Optional[Any] = self.image_processor(UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ )
encoding.update(UpperCAmelCase_ )
return encoding
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.tokenizer.model_input_names
_lowercase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
if os.path.isfile(UpperCAmelCase_ ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCAmelCase_ ,exist_ok=UpperCAmelCase_ )
_lowercase : Any = os.path.join(UpperCAmelCase_ ,"""qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(UpperCAmelCase_ )
return super().save_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
_lowercase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ ,subfolder="""qformer_tokenizer""" )
_lowercase : Any = cls._get_arguments_from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
args.append(UpperCAmelCase_ )
return cls(*UpperCAmelCase_ )
| 336 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCamelCase ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 'gpt_neox'
def __init__( self ,UpperCAmelCase_=5_04_32 ,UpperCAmelCase_=61_44 ,UpperCAmelCase_=44 ,UpperCAmelCase_=64 ,UpperCAmelCase_=2_45_76 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.25 ,UpperCAmelCase_=1_00_00 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=20_48 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-5 ,UpperCAmelCase_=True ,UpperCAmelCase_=0 ,UpperCAmelCase_=2 ,UpperCAmelCase_=False ,UpperCAmelCase_=True ,UpperCAmelCase_=None ,**UpperCAmelCase_ ,):
super().__init__(bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ )
_lowercase : int = vocab_size
_lowercase : Optional[int] = max_position_embeddings
_lowercase : List[Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_act
_lowercase : Optional[int] = rotary_pct
_lowercase : Dict = rotary_emb_base
_lowercase : int = attention_dropout
_lowercase : Tuple = hidden_dropout
_lowercase : str = classifier_dropout
_lowercase : Tuple = initializer_range
_lowercase : Optional[Any] = layer_norm_eps
_lowercase : Tuple = use_cache
_lowercase : Union[str, Any] = tie_word_embeddings
_lowercase : List[Any] = use_parallel_residual
_lowercase : Any = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def lowerCamelCase__ ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,lowerCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
_lowercase : List[Any] = self.rope_scaling.get("""type""" ,lowerCamelCase__ )
_lowercase : Any = self.rope_scaling.get("""factor""" ,lowerCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowerCamelCase__ ,lowerCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 357 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase: Tuple = logging.get_logger(__name__)
UpperCAmelCase: List[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ : Optional[int] = ["past_key_values"]
SCREAMING_SNAKE_CASE_ : Tuple = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self ,UpperCAmelCase_=1_00 ,UpperCAmelCase_=5 ,UpperCAmelCase_=1 ,UpperCAmelCase_=1 ,UpperCAmelCase_=2_49 ,UpperCAmelCase_=6 ,UpperCAmelCase_=17 ,UpperCAmelCase_=25 ,UpperCAmelCase_=4 ,UpperCAmelCase_=4 ,UpperCAmelCase_=1_28 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.0006 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=1 ,UpperCAmelCase_=True ,UpperCAmelCase_=1 ,UpperCAmelCase_=5_02_56 ,UpperCAmelCase_=5_02_56 ,**UpperCAmelCase_ ,):
_lowercase : Dict = vocab_size
_lowercase : List[str] = action_weight
_lowercase : int = reward_weight
_lowercase : List[Any] = value_weight
_lowercase : List[str] = max_position_embeddings
_lowercase : Any = block_size
_lowercase : Any = action_dim
_lowercase : List[str] = observation_dim
_lowercase : Union[str, Any] = transition_dim
_lowercase : str = learning_rate
_lowercase : Tuple = n_layer
_lowercase : Optional[int] = n_head
_lowercase : List[str] = n_embd
_lowercase : List[str] = embd_pdrop
_lowercase : Optional[Any] = attn_pdrop
_lowercase : List[Any] = resid_pdrop
_lowercase : str = initializer_range
_lowercase : Optional[Any] = layer_norm_eps
_lowercase : List[Any] = kaiming_initializer_range
_lowercase : List[Any] = use_cache
super().__init__(pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
| 336 | 0 |
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ):
_lowercase : int = len(lowercase_ )
_lowercase : Dict = [0] * len_array
if len_array > 0:
_lowercase : str = array[0]
for i in range(1 ,lowercase_ ):
_lowercase : List[Any] = self.prefix_sum[i - 1] + array[i]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Tuple = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowercase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase: Any = logging.get_logger(__name__)
UpperCAmelCase: List[str] = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "instructblip_vision_model"
def __init__( self ,UpperCAmelCase_=14_08 ,UpperCAmelCase_=61_44 ,UpperCAmelCase_=39 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=14 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=1E-6 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=1E-10 ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Tuple = intermediate_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = patch_size
_lowercase : Optional[Any] = image_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[Any] = attention_dropout
_lowercase : List[Any] = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : Tuple = qkv_bias
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : List[Any] = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "instructblip_qformer"
def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=0 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=2 ,UpperCAmelCase_=14_08 ,**UpperCAmelCase_ ,):
super().__init__(pad_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : List[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : Optional[Any] = hidden_act
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : List[Any] = max_position_embeddings
_lowercase : Tuple = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Any = position_embedding_type
_lowercase : Dict = cross_attention_frequency
_lowercase : Optional[Any] = encoder_hidden_size
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : Dict = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : str = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "instructblip"
SCREAMING_SNAKE_CASE_ : List[str] = True
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=32 ,**UpperCAmelCase_ ):
super().__init__(**UpperCAmelCase_ )
if vision_config is None:
_lowercase : str = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
_lowercase : Any = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
_lowercase : Optional[int] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
_lowercase : int = InstructBlipVisionConfig(**UpperCAmelCase_ )
_lowercase : Optional[int] = InstructBlipQFormerConfig(**UpperCAmelCase_ )
_lowercase : Dict = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
_lowercase : str = CONFIG_MAPPING[text_model_type](**UpperCAmelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : Union[str, Any] = self.text_config.is_encoder_decoder
_lowercase : List[str] = num_query_tokens
_lowercase : List[str] = self.vision_config.hidden_size
_lowercase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : Union[str, Any] = 1.0
_lowercase : Dict = 0.02
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowercase : int = self.vision_config.to_dict()
_lowercase : Any = self.qformer_config.to_dict()
_lowercase : Any = self.text_config.to_dict()
_lowercase : Optional[int] = self.__class__.model_type
return output
| 336 | 0 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=13 ,UpperCAmelCase_=7 ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=False ,UpperCAmelCase_=True ,UpperCAmelCase_=99 ,UpperCAmelCase_=32 ,UpperCAmelCase_=5 ,UpperCAmelCase_=4 ,UpperCAmelCase_=37 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=3 ,UpperCAmelCase_=4 ,UpperCAmelCase_=None ,):
_lowercase : int = parent
_lowercase : Any = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : Tuple = is_training
_lowercase : List[Any] = use_input_mask
_lowercase : int = use_token_type_ids
_lowercase : Optional[Any] = use_labels
_lowercase : Any = vocab_size
_lowercase : str = hidden_size
_lowercase : int = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : str = hidden_act
_lowercase : int = hidden_dropout_prob
_lowercase : List[Any] = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : Any = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : str = initializer_range
_lowercase : List[str] = num_labels
_lowercase : int = num_choices
_lowercase : Optional[int] = scope
def lowerCamelCase__ ( self ):
_lowercase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowercase : Union[str, Any] = None
if self.use_input_mask:
_lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Optional[int] = None
if self.use_token_type_ids:
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowercase : List[Any] = None
_lowercase : Optional[int] = None
_lowercase : str = None
if self.use_labels:
_lowercase : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowercase : Dict = ids_tensor([self.batch_size] ,self.num_choices )
_lowercase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__A ,initializer_range=self.initializer_range ,)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[Any] = LlamaModel(config=__A )
model.to(__A )
model.eval()
_lowercase : Optional[int] = model(__A ,attention_mask=__A )
_lowercase : Any = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
_lowercase : List[str] = True
_lowercase : str = LlamaModel(__A )
model.to(__A )
model.eval()
_lowercase : int = model(
__A ,attention_mask=__A ,encoder_hidden_states=__A ,encoder_attention_mask=__A ,)
_lowercase : List[Any] = model(
__A ,attention_mask=__A ,encoder_hidden_states=__A ,)
_lowercase : List[str] = model(__A ,attention_mask=__A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
_lowercase : int = LlamaForCausalLM(config=__A )
model.to(__A )
model.eval()
_lowercase : Tuple = model(__A ,attention_mask=__A ,labels=__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
_lowercase : int = True
_lowercase : List[Any] = True
_lowercase : Tuple = LlamaForCausalLM(config=__A )
model.to(__A )
model.eval()
# first forward pass
_lowercase : Union[str, Any] = model(
__A ,attention_mask=__A ,encoder_hidden_states=__A ,encoder_attention_mask=__A ,use_cache=__A ,)
_lowercase : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowercase : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
_lowercase : Any = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
_lowercase : List[str] = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowercase : Union[str, Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
_lowercase : List[Any] = model(
__A ,attention_mask=__A ,encoder_hidden_states=__A ,encoder_attention_mask=__A ,output_hidden_states=__A ,)["""hidden_states"""][0]
_lowercase : List[str] = model(
__A ,attention_mask=__A ,encoder_hidden_states=__A ,encoder_attention_mask=__A ,past_key_values=__A ,output_hidden_states=__A ,)["""hidden_states"""][0]
# select random slice
_lowercase : Union[str, Any] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowercase : int = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowercase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A ,__A ,atol=1E-3 ) )
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : int = config_and_inputs
_lowercase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = (LlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[int] = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Any = False
def lowerCamelCase__ ( self ):
_lowercase : Tuple = LlamaModelTester(self )
_lowercase : str = ConfigTester(self ,config_class=__A ,hidden_size=37 )
def lowerCamelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase__ ( self ):
_lowercase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase : Any = type
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase__ ( self ):
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Optional[Any] = 3
_lowercase : List[Any] = input_dict["""input_ids"""]
_lowercase : Union[str, Any] = input_ids.ne(1 ).to(__A )
_lowercase : Optional[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
_lowercase : Union[str, Any] = LlamaForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowercase : Any = model(__A ,attention_mask=__A ,labels=__A )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self ):
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Dict = 3
_lowercase : List[Any] = """single_label_classification"""
_lowercase : str = input_dict["""input_ids"""]
_lowercase : Union[str, Any] = input_ids.ne(1 ).to(__A )
_lowercase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
_lowercase : Optional[int] = LlamaForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowercase : Dict = model(__A ,attention_mask=__A ,labels=__A )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self ):
_lowercase , _lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Union[str, Any] = 3
_lowercase : Optional[Any] = """multi_label_classification"""
_lowercase : Optional[int] = input_dict["""input_ids"""]
_lowercase : List[str] = input_ids.ne(1 ).to(__A )
_lowercase : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
_lowercase : Tuple = LlamaForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowercase : Dict = model(__A ,attention_mask=__A ,labels=__A )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def lowerCamelCase__ ( self ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : List[str] = ids_tensor([1, 10] ,config.vocab_size )
_lowercase : List[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowercase : Optional[int] = LlamaModel(__A )
original_model.to(__A )
original_model.eval()
_lowercase : int = original_model(__A ).last_hidden_state
_lowercase : Tuple = original_model(__A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowercase : Tuple = {"""type""": scaling_type, """factor""": 10.0}
_lowercase : int = LlamaModel(__A )
scaled_model.to(__A )
scaled_model.eval()
_lowercase : List[Any] = scaled_model(__A ).last_hidden_state
_lowercase : Optional[Any] = scaled_model(__A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__A ,__A ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__A ,__A ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__A ,__A ,atol=1E-5 ) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def lowerCamelCase__ ( self ):
_lowercase : List[str] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_lowercase : Dict = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" )
_lowercase : Optional[Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_lowercase : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,__A ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowercase : Optional[int] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,__A ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_lowercase : Any = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" )
_lowercase : Union[str, Any] = model(torch.tensor(__A ) )
# Expected mean on dim = -1
_lowercase : Union[str, Any] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,__A ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowercase : Tuple = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,__A ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_lowercase : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" )
_lowercase : int = model(torch.tensor(__A ) )
# Expected mean on dim = -1
_lowercase : Tuple = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,__A ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowercase : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,__A ,atol=1E-2 ,rtol=1E-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def lowerCamelCase__ ( self ):
_lowercase : Dict = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_lowercase : List[Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" )
_lowercase : Optional[Any] = model(torch.tensor(__A ) )
_lowercase : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,__A ,atol=1E-2 ,rtol=1E-2 )
# fmt: off
_lowercase : Optional[Any] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,__A ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def lowerCamelCase__ ( self ):
_lowercase : Any = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
_lowercase : int = """Simply put, the theory of relativity states that """
_lowercase : List[str] = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
_lowercase : Union[str, Any] = tokenizer.encode(__A ,return_tensors="""pt""" )
_lowercase : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=__A )
# greedy generation outputs
_lowercase : Optional[int] = model.generate(__A ,max_new_tokens=64 ,top_p=__A ,temperature=1 ,do_sample=__A )
_lowercase : List[str] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=__A )
self.assertEqual(__A ,__A )
| 359 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if k in (0.04, 0.06):
_lowercase : Optional[Any] = k
_lowercase : Optional[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self ):
return str(self.k )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 )
_lowercase , _lowercase : Dict = img.shape
_lowercase : list[list[int]] = []
_lowercase : int = img.copy()
_lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB )
_lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ )
_lowercase : Optional[int] = dx**2
_lowercase : Optional[Any] = dy**2
_lowercase : Optional[Any] = dx * dy
_lowercase : List[str] = 0.04
_lowercase : Optional[Any] = self.window_size // 2
for y in range(UpperCAmelCase_ ,h - offset ):
for x in range(UpperCAmelCase_ ,w - offset ):
_lowercase : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Dict = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Union[str, Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : int = (wxx * wyy) - (wxy**2)
_lowercase : Union[str, Any] = wxx + wyy
_lowercase : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,2_55 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3)
UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 336 | 0 |
"""simple docstring"""
import os
from collections.abc import Iterator
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = "." ):
for dir_path, dir_names, filenames in os.walk(__UpperCAmelCase ):
_lowercase : List[Any] = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__UpperCAmelCase , __UpperCAmelCase ).lstrip("""./""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return F"""{i * " "}*""" if i else "\n##"
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : List[str] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F"""{md_prefix(__UpperCAmelCase )} {new_part.replace("_" , " " ).title()}""" )
return new_path
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = "." ):
_lowercase : Union[str, Any] = """"""
for filepath in sorted(good_file_paths(__UpperCAmelCase ) ):
_lowercase : Any = os.path.split(__UpperCAmelCase )
if filepath != old_path:
_lowercase : Optional[int] = print_path(__UpperCAmelCase , __UpperCAmelCase )
_lowercase : str = (filepath.count(os.sep ) + 1) if filepath else 0
_lowercase : Optional[int] = F"""{filepath}/{filename}""".replace(""" """ , """%20""" )
_lowercase : Optional[int] = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F"""{md_prefix(__UpperCAmelCase )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md(""".""")
| 360 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = LEDTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = LEDTokenizerFast
SCREAMING_SNAKE_CASE_ : List[str] = True
def lowerCamelCase__ ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowercase : List[Any] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowercase : Dict = {"""unk_token""": """<unk>"""}
_lowercase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowercase : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(UpperCAmelCase_ ,max_length=len(UpperCAmelCase_ ) ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
_lowercase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIn("""input_ids""" ,UpperCAmelCase_ )
self.assertIn("""attention_mask""" ,UpperCAmelCase_ )
self.assertNotIn("""labels""" ,UpperCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(text_target=UpperCAmelCase_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" )
self.assertEqual(32 ,targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : List[Any] = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""] ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual(batch.input_ids.shape ,(2, 51_22) )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = ["""A long paragraph for summarization."""]
_lowercase : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : List[str] = tokenizer(text_target=UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : Union[str, Any] = inputs["""input_ids"""]
_lowercase : List[str] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : str = ["""Summary of the text.""", """Another summary."""]
_lowercase : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowercase : Any = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ )
_lowercase : str = [[0] * len(UpperCAmelCase_ ) for x in encoded_output["""input_ids"""]]
_lowercase : Optional[int] = tokenizer.pad(UpperCAmelCase_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Dict = """A, <mask> AllenNLP sentence."""
_lowercase : List[Any] = tokenizer_r.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
_lowercase : Any = tokenizer_p.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
_lowercase : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 336 | 0 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __SCREAMING_SNAKE_CASE ( ):
raise RuntimeError("""CUDA out of memory.""" )
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self ):
super().__init__()
_lowercase : Optional[Any] = nn.Linear(3 ,4 )
_lowercase : List[Any] = nn.BatchNormad(4 )
_lowercase : List[str] = nn.Linear(4 ,5 )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_ ) ) )
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : Any = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(UpperCAmelCase_ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ ,[1_28, 64, 32, 16, 8] )
def lowerCamelCase__ ( self ):
_lowercase : List[str] = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(UpperCAmelCase_ ,UpperCAmelCase_ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_lowercase , _lowercase : Union[str, Any] = mock_training_loop_function("""hello""" )
self.assertListEqual(lowerCamelCase_ ,[1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] ,[8, """hello"""] )
def lowerCamelCase__ ( self ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(UpperCAmelCase_ ):
pass
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" ,cm.exception.args[0] )
def lowerCamelCase__ ( self ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase_ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" ,cm.exception.args[0] )
def lowerCamelCase__ ( self ):
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function(1_28 ,"""hello""" ,"""world""" )
self.assertIn("""Batch size was passed into `f`""" ,cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" ,cm.exception.args[0] )
def lowerCamelCase__ ( self ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase_ ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" ,cm.exception.args[0] )
@require_cuda
def lowerCamelCase__ ( self ):
_lowercase : int = torch.cuda.memory_allocated()
_lowercase : Dict = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() ,lowerCamelCase_ )
_lowercase : Any = release_memory(lowerCamelCase_ )
self.assertEqual(torch.cuda.memory_allocated() ,lowerCamelCase_ )
| 361 |
"""simple docstring"""
import argparse
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Any = f.readlines()
_lowercase : Optional[int] = F"""class {class_name}("""
_lowercase : List[str] = F"""{4 * " "}def {test_name}("""
_lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}"""
_lowercase : int = F"""{16 * " "}{correct_line.split()[0]}"""
_lowercase : str = False
_lowercase : Optional[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = False
_lowercase : int = 0
_lowercase : Tuple = 0
_lowercase : Union[str, Any] = []
for line in lines:
if line.startswith(__UpperCAmelCase ):
_lowercase : List[str] = True
elif in_class and line.startswith(__UpperCAmelCase ):
_lowercase : str = True
elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )):
_lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : Optional[int] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Optional[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_lowercase : Union[str, Any] = False
else:
new_lines.append(__UpperCAmelCase )
with open(__UpperCAmelCase , """w""" ) as f:
for line in new_lines:
f.write(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ):
if fail is not None:
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Dict = {l.strip() for l in f.readlines()}
else:
_lowercase : int = None
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : int = f.readlines()
_lowercase : int = defaultdict(__UpperCAmelCase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : int = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase: List[Any] = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
UpperCAmelCase: Any = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 336 | 0 |
"""simple docstring"""
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase = False ):
if not arr:
return 0
_lowercase : List[Any] = 0 if allow_empty_subarrays else float("""-inf""" )
_lowercase : Dict = 0.0
for num in arr:
_lowercase : List[Any] = max(0 if allow_empty_subarrays else num , curr_sum + num )
_lowercase : Optional[Any] = max(__lowerCAmelCase , __lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase: Any = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"""{max_subarray_sum(nums) = }""")
| 362 |
"""simple docstring"""
UpperCAmelCase: List[str] = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 336 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase__ ( self ):
_lowercase : Any = 1
_lowercase : str = 3
_lowercase : Any = (32, 32)
_lowercase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_lowercase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_lowercase : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_lowercase : int = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=50_06 ,)
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def lowerCamelCase__ ( self ):
def extract(*UpperCAmelCase_ ,**UpperCAmelCase_ ):
class UpperCamelCase :
"""simple docstring"""
def __init__( self ):
_lowercase : Dict = torch.ones([0] )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def lowerCamelCase__ ( self ):
_lowercase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowercase : Any = self.dummy_cond_unet
_lowercase : Any = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
_lowercase : str = self.dummy_vae
_lowercase : List[Any] = self.dummy_text_encoder
_lowercase : Union[str, Any] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
_lowercase : List[str] = 77
_lowercase : List[str] = self.dummy_image.to(lowerCAmelCase__ )
_lowercase : int = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_lowercase : str = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ ,scheduler=lowerCAmelCase__ ,vae=lowerCAmelCase__ ,text_encoder=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,safety_checker=lowerCAmelCase__ ,feature_extractor=self.dummy_extractor ,)
_lowercase : Optional[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=lowerCAmelCase__ )
_lowercase : Tuple = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_lowercase : Dict = "A painting of a squirrel eating a burger"
_lowercase : Union[str, Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_lowercase : List[str] = alt_pipe(
[prompt] ,generator=lowerCAmelCase__ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,image=lowerCAmelCase__ ,)
_lowercase : Tuple = output.images
_lowercase : Union[str, Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_lowercase : List[str] = alt_pipe(
[prompt] ,generator=lowerCAmelCase__ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,image=lowerCAmelCase__ ,return_dict=lowerCAmelCase__ ,)[0]
_lowercase : List[Any] = image[0, -3:, -3:, -1]
_lowercase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : List[Any] = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.dummy_cond_unet
_lowercase : Dict = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
_lowercase : List[Any] = self.dummy_vae
_lowercase : Tuple = self.dummy_text_encoder
_lowercase : str = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
_lowercase : List[Any] = 77
_lowercase : Tuple = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
_lowercase : Optional[int] = unet.half()
_lowercase : int = vae.half()
_lowercase : Optional[Any] = bert.half()
# make sure here that pndm scheduler skips prk
_lowercase : Dict = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ ,scheduler=lowerCAmelCase__ ,vae=lowerCAmelCase__ ,text_encoder=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,safety_checker=lowerCAmelCase__ ,feature_extractor=self.dummy_extractor ,)
_lowercase : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=lowerCAmelCase__ )
_lowercase : str = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_lowercase : List[Any] = "A painting of a squirrel eating a burger"
_lowercase : Optional[Any] = torch.manual_seed(0 )
_lowercase : Union[str, Any] = alt_pipe(
[prompt] ,generator=lowerCAmelCase__ ,num_inference_steps=2 ,output_type="""np""" ,image=lowerCAmelCase__ ,).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def lowerCamelCase__ ( self ):
_lowercase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
_lowercase : str = init_image.resize((7_60, 5_04) )
_lowercase : Dict = "BAAI/AltDiffusion"
_lowercase : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ ,safety_checker=lowerCAmelCase__ ,)
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
_lowercase : str = "A fantasy landscape, trending on artstation"
_lowercase : str = torch.manual_seed(0 )
_lowercase : Any = pipe(
prompt=lowerCAmelCase__ ,image=lowerCAmelCase__ ,strength=0.75 ,guidance_scale=7.5 ,generator=lowerCAmelCase__ ,output_type="""np""" ,)
_lowercase : Optional[Any] = output.images[0]
_lowercase : List[str] = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
_lowercase : Dict = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ):
_lowercase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_lowercase : Optional[int] = init_image.resize((7_68, 5_12) )
_lowercase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
_lowercase : List[str] = "BAAI/AltDiffusion"
_lowercase : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ ,safety_checker=lowerCAmelCase__ ,)
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
_lowercase : Union[str, Any] = "A fantasy landscape, trending on artstation"
_lowercase : Optional[Any] = torch.manual_seed(0 )
_lowercase : str = pipe(
prompt=lowerCAmelCase__ ,image=lowerCAmelCase__ ,strength=0.75 ,guidance_scale=7.5 ,generator=lowerCAmelCase__ ,output_type="""np""" ,)
_lowercase : str = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 363 |
"""simple docstring"""
UpperCAmelCase: str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCAmelCase: Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCAmelCase: int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 336 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase: Dict = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: List[Any] = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: List[Any] = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Optional[Any] = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase: Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 364 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
UpperCAmelCase: List[Any] = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
def constraint_to_multiple_of(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=None ):
_lowercase : Union[str, Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowercase : str = math.floor(val / multiple ) * multiple
if x < min_val:
_lowercase : Dict = math.ceil(val / multiple ) * multiple
return x
_lowercase : List[str] = (output_size, output_size) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else output_size
_lowercase , _lowercase : List[Any] = get_image_size(__UpperCAmelCase )
_lowercase , _lowercase : Union[str, Any] = output_size
# determine new height and width
_lowercase : str = output_height / input_height
_lowercase : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowercase : str = scale_width
else:
# fit height
_lowercase : int = scale_height
_lowercase : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=__UpperCAmelCase )
_lowercase : Optional[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__UpperCAmelCase )
return (new_height, new_width)
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["pixel_values"]
def __init__( self ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = 1 / 2_55 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : List[Any] = size if size is not None else {"""height""": 3_84, """width""": 3_84}
_lowercase : str = get_size_dict(UpperCAmelCase_ )
_lowercase : Tuple = do_resize
_lowercase : Any = size
_lowercase : List[Any] = keep_aspect_ratio
_lowercase : Any = ensure_multiple_of
_lowercase : str = resample
_lowercase : Optional[Any] = do_rescale
_lowercase : List[Any] = rescale_factor
_lowercase : Union[str, Any] = do_normalize
_lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = PILImageResampling.BICUBIC ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
_lowercase : Optional[Any] = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_lowercase : Dict = get_resize_output_image_size(
UpperCAmelCase_ ,output_size=(size["""height"""], size["""width"""]) ,keep_aspect_ratio=UpperCAmelCase_ ,multiple=UpperCAmelCase_ ,)
return resize(UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return rescale(UpperCAmelCase_ ,scale=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return normalize(UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = ChannelDimension.FIRST ,**UpperCAmelCase_ ,):
_lowercase : Any = do_resize if do_resize is not None else self.do_resize
_lowercase : List[str] = size if size is not None else self.size
_lowercase : int = get_size_dict(UpperCAmelCase_ )
_lowercase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowercase : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowercase : List[str] = resample if resample is not None else self.resample
_lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : str = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_lowercase : int = image_std if image_std is not None else self.image_std
_lowercase : Union[str, Any] = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowercase : int = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
_lowercase : Union[str, Any] = [self.resize(image=UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ) for image in images]
if do_rescale:
_lowercase : int = [self.rescale(image=UpperCAmelCase_ ,scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
_lowercase : str = [self.normalize(image=UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ) for image in images]
_lowercase : Tuple = [to_channel_dimension_format(UpperCAmelCase_ ,UpperCAmelCase_ ) for image in images]
_lowercase : int = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase_ ,tensor_type=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(UpperCAmelCase_ ):
_lowercase : Tuple = target_sizes.numpy()
_lowercase : Optional[Any] = []
for idx in range(len(UpperCAmelCase_ ) ):
_lowercase : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=UpperCAmelCase_ )
_lowercase : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase_ )
else:
_lowercase : Union[str, Any] = logits.argmax(dim=1 )
_lowercase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 336 | 0 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCAmelCase: Optional[int] = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(F"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
_lowercase , _lowercase , _lowercase , _lowercase : int = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_lowercase : Dict = cached_file(snake_case__ , snake_case__ , force_download=not use_cached_models )
_lowercase : str = config_class.from_json_file(snake_case__ )
_lowercase : List[str] = True
_lowercase : Tuple = True
print(F"""Building TensorFlow model from configuration: {config}""" )
_lowercase : List[Any] = model_class(snake_case__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_lowercase : List[str] = cached_file(
snake_case__ , snake_case__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_lowercase : List[Any] = load_pytorch_checkpoint_in_tfa_model(snake_case__ , snake_case__ )
if compare_with_pt_model:
_lowercase : int = tf_model(tf_model.dummy_inputs , training=snake_case__ ) # build the network
_lowercase : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )
_lowercase : List[Any] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ )
with torch.no_grad():
_lowercase : int = pt_model(**pt_model.dummy_inputs )
_lowercase : int = pto[0].numpy()
_lowercase : int = tfo[0].numpy()
_lowercase : Optional[int] = np.amax(np.abs(np_pt - np_tf ) )
print(F"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2E-2, F"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(F"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(snake_case__ , save_format="""h5""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , ):
if args_model_type is None:
_lowercase : str = list(MODEL_CLASSES.keys() )
else:
_lowercase : str = [args_model_type]
for j, model_type in enumerate(snake_case__ , start=1 ):
print("""=""" * 100 )
print(F""" Converting model type {j}/{len(snake_case__ )}: {model_type}""" )
print("""=""" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[Any] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_lowercase : Any = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_lowercase : Union[str, Any] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(snake_case__ , snake_case__ ) , start=1 ):
print("""-""" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
_lowercase : Optional[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
F""" Converting checkpoint {i}/{len(snake_case__ )}: {model_shortcut_name} - model_type {model_type}""" )
print("""-""" * 100 )
if config_shortcut_name in aws_config_map:
_lowercase : Tuple = cached_file(snake_case__ , snake_case__ , force_download=not use_cached_models )
else:
_lowercase : Optional[int] = config_shortcut_name
if model_shortcut_name in aws_model_maps:
_lowercase : List[Any] = cached_file(snake_case__ , snake_case__ , force_download=not use_cached_models )
else:
_lowercase : Optional[Any] = model_shortcut_name
if os.path.isfile(snake_case__ ):
_lowercase : Tuple = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=snake_case__ , pytorch_checkpoint_path=snake_case__ , config_file=snake_case__ , tf_dump_path=os.path.join(snake_case__ , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=snake_case__ , )
if remove_cached_files:
os.remove(snake_case__ )
os.remove(snake_case__ )
if __name__ == "__main__":
UpperCAmelCase: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
F'Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
UpperCAmelCase: List[str] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 365 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCAmelCase: Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCAmelCase: Tuple = [0, 25, 50]
UpperCAmelCase: List[Any] = [25, 50, 75]
UpperCAmelCase: Optional[int] = fuzz.membership.trimf(X, abca)
UpperCAmelCase: Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCAmelCase: List[Any] = np.ones(75)
UpperCAmelCase: Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCAmelCase: str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCAmelCase: List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCAmelCase: int = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCAmelCase: int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCAmelCase: List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCAmelCase: int = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 336 | 0 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase: List[Any] = 'docs/source/en/_toctree.yml'
def a ( __UpperCAmelCase ):
_lowercase : Optional[int] = defaultdict(lowerCAmelCase__ )
_lowercase : Tuple = []
_lowercase : List[Any] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(lowerCAmelCase__ )
_lowercase : Tuple = new_doc_list
_lowercase : Tuple = [key for key, value in counts.items() if value > 1]
_lowercase : Tuple = []
for duplicate_key in duplicates:
_lowercase : Tuple = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
_lowercase : Optional[int] = sorted(lowerCAmelCase__ , key=lambda __UpperCAmelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowerCAmelCase__ ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(lowerCAmelCase__ )
# Sort
return overview_doc
def a ( __UpperCAmelCase=False ):
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as f:
_lowercase : List[str] = yaml.safe_load(f.read() )
# Get to the API doc
_lowercase : List[str] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowercase : Any = content[api_idx]["""sections"""]
# Then to the model doc
_lowercase : int = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowercase : Optional[Any] = api_doc[scheduler_idx]["""sections"""]
_lowercase : Any = clean_doc_toc(lowerCAmelCase__ )
_lowercase : str = False
if new_scheduler_doc != scheduler_doc:
_lowercase : str = True
if overwrite:
_lowercase : Tuple = new_scheduler_doc
if diff:
if overwrite:
_lowercase : Union[str, Any] = api_doc
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def a ( __UpperCAmelCase=False ):
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as f:
_lowercase : List[str] = yaml.safe_load(f.read() )
# Get to the API doc
_lowercase : Tuple = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowercase : Dict = content[api_idx]["""sections"""]
# Then to the model doc
_lowercase : Optional[Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowercase : List[Any] = False
_lowercase : Union[str, Any] = api_doc[pipeline_idx]["""sections"""]
_lowercase : str = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowercase : Dict = pipeline_doc["""section"""]
_lowercase : Optional[int] = clean_doc_toc(lowerCAmelCase__ )
if overwrite:
_lowercase : Dict = new_sub_pipeline_doc
new_pipeline_docs.append(lowerCAmelCase__ )
# sort overall pipeline doc
_lowercase : int = clean_doc_toc(lowerCAmelCase__ )
if new_pipeline_docs != pipeline_docs:
_lowercase : Any = True
if overwrite:
_lowercase : Any = new_pipeline_docs
if diff:
if overwrite:
_lowercase : Union[str, Any] = api_doc
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
UpperCAmelCase: List[str] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase: Tuple = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 366 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : str = tempfile.mkdtemp()
# fmt: off
_lowercase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowercase : Optional[int] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
_lowercase : Dict = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
_lowercase : List[Any] = os.path.join(self.tmpdirname ,UpperCAmelCase_ )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
_lowercase : Tuple = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : List[Any] = self.get_rust_tokenizer()
_lowercase : List[Any] = self.get_image_processor()
_lowercase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
_lowercase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCAmelCase_ )
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
_lowercase : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_lowercase : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
_lowercase : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[int] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : int = self.prepare_image_inputs()
_lowercase : str = image_processor(UpperCAmelCase_ ,return_tensors="""np""" )
_lowercase : int = processor(images=UpperCAmelCase_ ,return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : List[Any] = """lower newer"""
_lowercase : Any = processor(text=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : str = """lower newer"""
_lowercase : List[Any] = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : int = processor.batch_decode(UpperCAmelCase_ )
_lowercase : Tuple = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Optional[Any] = """lower newer"""
_lowercase : Any = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 336 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=13 ,UpperCAmelCase_=30 ,UpperCAmelCase_=2 ,UpperCAmelCase_=3 ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=32 ,UpperCAmelCase_=5 ,UpperCAmelCase_=4 ,UpperCAmelCase_=37 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=10 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=None ,UpperCAmelCase_=2 ,):
_lowercase : Any = parent
_lowercase : Optional[int] = batch_size
_lowercase : Union[str, Any] = image_size
_lowercase : Tuple = patch_size
_lowercase : Optional[int] = num_channels
_lowercase : Any = is_training
_lowercase : List[str] = use_labels
_lowercase : str = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : List[Any] = intermediate_size
_lowercase : Dict = hidden_act
_lowercase : int = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : List[str] = type_sequence_label_size
_lowercase : Optional[Any] = initializer_range
_lowercase : Dict = scope
_lowercase : List[str] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase : Optional[Any] = (image_size // patch_size) ** 2
_lowercase : Tuple = num_patches + 1
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase : Optional[int] = None
if self.use_labels:
_lowercase : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowercase : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self ):
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=UpperCamelCase__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Dict = ViTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_lowercase : str = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[Any] = ViTForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_lowercase : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowercase : Tuple = 1
_lowercase : List[Any] = ViTForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_lowercase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowercase : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Dict = self.type_sequence_label_size
_lowercase : str = ViTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_lowercase : Any = model(UpperCamelCase__ ,labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowercase : Dict = 1
_lowercase : List[str] = ViTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_lowercase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowercase : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.prepare_config_and_inputs()
(
_lowercase
) : Optional[Any] = config_and_inputs
_lowercase : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Dict = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = ViTModelTester(self )
_lowercase : Any = ConfigTester(self ,config_class=UpperCamelCase__ ,has_text_modality=UpperCamelCase__ ,hidden_size=37 )
def lowerCamelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[str] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowercase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ ,nn.Linear ) )
def lowerCamelCase__ ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : str = model_class(UpperCamelCase__ )
_lowercase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Union[str, Any] = [*signature.parameters.keys()]
_lowercase : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,UpperCamelCase__ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase__ ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase__ ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : str = ViTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self ):
_lowercase : Dict = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(UpperCamelCase__ )
_lowercase : Optional[int] = self.default_image_processor
_lowercase : List[str] = prepare_img()
_lowercase : Union[str, Any] = image_processor(images=UpperCamelCase__ ,return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
_lowercase : str = model(**UpperCamelCase__ )
# verify the logits
_lowercase : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,UpperCamelCase__ )
_lowercase : List[str] = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,UpperCamelCase__ ,atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self ):
_lowercase : str = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(UpperCamelCase__ )
_lowercase : Dict = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" ,size=4_80 )
_lowercase : str = prepare_img()
_lowercase : Optional[Any] = image_processor(images=UpperCamelCase__ ,return_tensors="""pt""" )
_lowercase : Tuple = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
_lowercase : Tuple = model(UpperCamelCase__ ,interpolate_pos_encoding=UpperCamelCase__ )
# verify the logits
_lowercase : List[Any] = torch.Size((1, 36_01, 3_84) )
self.assertEqual(outputs.last_hidden_state.shape ,UpperCamelCase__ )
_lowercase : Any = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,UpperCamelCase__ ,atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = ViTModel.from_pretrained("""facebook/dino-vits8""" ,torch_dtype=torch.floataa ,device_map="""auto""" )
_lowercase : List[str] = self.default_image_processor
_lowercase : Optional[Any] = prepare_img()
_lowercase : Dict = image_processor(images=UpperCamelCase__ ,return_tensors="""pt""" )
_lowercase : Tuple = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_lowercase : Dict = model(UpperCamelCase__ )
| 367 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCAmelCase: str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , ):
import pyspark
def generate_fn():
_lowercase : List[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
_lowercase : Optional[int] = df_with_partition_id.select("""*""" ).where(F"""part_id = {partition_id}""" ).drop("""part_id""" )
_lowercase : int = partition_df.collect()
_lowercase : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class UpperCamelCase ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ,):
_lowercase : Union[str, Any] = df
_lowercase : List[str] = partition_order or range(self.df.rdd.getNumPartitions() )
_lowercase : Tuple = _generate_iterable_examples(self.df ,self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCAmelCase_ )
return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = self.split_shard_indices_by_worker(UpperCAmelCase_ ,UpperCAmelCase_ )
return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ )
@property
def lowerCamelCase__ ( self ):
return len(self.partition_order )
class UpperCamelCase ( datasets.DatasetBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = SparkConfig
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
import pyspark
_lowercase : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
_lowercase : List[Any] = df
_lowercase : int = working_dir
super().__init__(
cache_dir=UpperCAmelCase_ ,config_name=str(self.df.semanticHash() ) ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(UpperCAmelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir ,exist_ok=UpperCAmelCase_ )
_lowercase : Union[str, Any] = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCAmelCase_ ,"""a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowercase : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(UpperCAmelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def lowerCamelCase__ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
import pyspark
def get_arrow_batch_size(UpperCAmelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
_lowercase : List[str] = self.df.count()
_lowercase : List[str] = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowercase : Union[str, Any] = (
self.df.limit(UpperCAmelCase_ )
.repartition(1 )
.mapInArrow(UpperCAmelCase_ ,"""batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowercase : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowercase : int = min(UpperCAmelCase_ ,int(approx_total_size / max_shard_size ) )
_lowercase : List[Any] = self.df.repartition(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
import pyspark
_lowercase : Union[str, Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter
_lowercase : List[Any] = os.path.join(self._working_dir ,os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath
_lowercase : Any = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowercase : Union[str, Any] = self.config.features
_lowercase : Optional[int] = self._writer_batch_size
_lowercase : Optional[Any] = self._fs.storage_options
def write_arrow(UpperCAmelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowercase : Any = pyspark.TaskContext().taskAttemptId()
_lowercase : List[str] = next(UpperCAmelCase_ ,UpperCAmelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
_lowercase : List[Any] = 0
_lowercase : int = writer_class(
features=UpperCAmelCase_ ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,)
_lowercase : Optional[int] = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCAmelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowercase , _lowercase : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
shard_id += 1
_lowercase : Union[str, Any] = writer_class(
features=writer._features ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,)
_lowercase : Dict = pa.Table.from_batches([batch] )
writer.write_table(UpperCAmelCase_ )
if writer._num_bytes > 0:
_lowercase , _lowercase : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ):
_lowercase : Dict = os.path.join(os.path.dirname(UpperCAmelCase_ ) ,os.path.basename(UpperCAmelCase_ ) )
shutil.move(UpperCAmelCase_ ,UpperCAmelCase_ )
_lowercase : List[str] = (
self.df.mapInArrow(UpperCAmelCase_ ,"""task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = "arrow" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
self._validate_cache_dir()
_lowercase : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCAmelCase_ )
_lowercase : Optional[int] = not is_remote_filesystem(self._fs )
_lowercase : Dict = os.path.join if is_local else posixpath.join
_lowercase : int = """-TTTTT-SSSSS-of-NNNNN"""
_lowercase : Optional[Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
_lowercase : Dict = path_join(self._output_dir ,UpperCAmelCase_ )
_lowercase : List[Any] = 0
_lowercase : Optional[Any] = 0
_lowercase : int = 0
_lowercase : Any = []
_lowercase : Any = []
for task_id, content in self._prepare_split_single(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCAmelCase_ )
_lowercase : Optional[int] = total_num_examples
_lowercase : List[Any] = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
_lowercase : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowercase : Union[str, Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
rename(
UpperCAmelCase_ ,fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace("""TTTTT-SSSSS""" ,f"""{global_shard_id:05d}""" ).replace("""NNNNN""" ,f"""{total_shards:05d}""" ) ,)
_lowercase : Optional[Any] = []
_lowercase : List[str] = 0
for i in range(len(UpperCAmelCase_ ) ):
_lowercase , _lowercase : List[str] = task_id_and_num_shards[i]
for shard_id in range(UpperCAmelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCAmelCase_ ,len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect()
else:
# don't use any pattern
_lowercase : Tuple = 0
_lowercase : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace(UpperCAmelCase_ ,"""""" ) ,)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,):
return SparkExamplesIterable(self.df )
| 336 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCAmelCase: int = logging.get_logger(__name__)
UpperCAmelCase: List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase: str = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
UpperCAmelCase: Tuple = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
class UpperCamelCase ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : Any = BartTokenizer
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_="replace" ,UpperCAmelCase_="<s>" ,UpperCAmelCase_="</s>" ,UpperCAmelCase_="</s>" ,UpperCAmelCase_="<s>" ,UpperCAmelCase_="<unk>" ,UpperCAmelCase_="<pad>" ,UpperCAmelCase_="<mask>" ,UpperCAmelCase_=False ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,):
super().__init__(
UpperCAmelCase_ ,UpperCAmelCase_ ,tokenizer_file=UpperCAmelCase_ ,errors=UpperCAmelCase_ ,bos_token=UpperCAmelCase_ ,eos_token=UpperCAmelCase_ ,sep_token=UpperCAmelCase_ ,cls_token=UpperCAmelCase_ ,unk_token=UpperCAmelCase_ ,pad_token=UpperCAmelCase_ ,mask_token=UpperCAmelCase_ ,add_prefix_space=UpperCAmelCase_ ,trim_offsets=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
_lowercase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,UpperCAmelCase_ ) != add_prefix_space:
_lowercase : Union[str, Any] = getattr(UpperCAmelCase_ ,pre_tok_state.pop("""type""" ) )
_lowercase : Tuple = add_prefix_space
_lowercase : Union[str, Any] = pre_tok_class(**UpperCAmelCase_ )
_lowercase : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowercase : Tuple = 'post_processor'
_lowercase : Union[str, Any] = getattr(self.backend_tokenizer ,UpperCAmelCase_ ,UpperCAmelCase_ )
if tokenizer_component_instance:
_lowercase : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowercase : Optional[Any] = tuple(state["""sep"""] )
if "cls" in state:
_lowercase : str = tuple(state["""cls"""] )
_lowercase : Tuple = False
if state.get("""add_prefix_space""" ,UpperCAmelCase_ ) != add_prefix_space:
_lowercase : Optional[int] = add_prefix_space
_lowercase : Optional[Any] = True
if state.get("""trim_offsets""" ,UpperCAmelCase_ ) != trim_offsets:
_lowercase : List[str] = trim_offsets
_lowercase : List[str] = True
if changes_to_apply:
_lowercase : Optional[int] = getattr(UpperCAmelCase_ ,state.pop("""type""" ) )
_lowercase : Optional[Any] = component_class(**UpperCAmelCase_ )
setattr(self.backend_tokenizer ,UpperCAmelCase_ ,UpperCAmelCase_ )
@property
def lowerCamelCase__ ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = AddedToken(UpperCAmelCase_ ,lstrip=UpperCAmelCase_ ,rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else value
_lowercase : Union[str, Any] = value
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
_lowercase : str = kwargs.get("""is_split_into_words""" ,UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
_lowercase : Any = kwargs.get("""is_split_into_words""" ,UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Union[str, Any] = self._tokenizer.model.save(UpperCAmelCase_ ,name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ):
_lowercase : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Dict = [self.sep_token_id]
_lowercase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 368 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase: Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = XLNetTokenizer
SCREAMING_SNAKE_CASE_ : Dict = XLNetTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
def lowerCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = """<s>"""
_lowercase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) ,UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""<eod>""" )
self.assertEqual(len(UpperCAmelCase_ ) ,10_06 )
def lowerCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,10_00 )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[2_85, 46, 10, 1_70, 3_82] )
_lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
_lowercase : List[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""▁he""", """ll""", """o"""] )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
@slow
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
_lowercase : int = tokenizer.encode("""sequence builders""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
_lowercase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ,UpperCAmelCase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase__ ( self ):
# fmt: off
_lowercase : Union[str, Any] = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ ,model_name="""xlnet-base-cased""" ,revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" ,)
| 336 | 0 |
"""simple docstring"""
class UpperCamelCase : # Public class to implement a graph
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Any = row
_lowercase : Union[str, Any] = col
_lowercase : List[str] = graph
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : str = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_lowercase : int = [-1, 0, 1, -1, 1, -1, 0, 1]
_lowercase : str = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] ,j + col_nbr[k] ,UpperCamelCase__ ):
self.diffs(i + row_nbr[k] ,j + col_nbr[k] ,UpperCamelCase__ )
def lowerCamelCase__ ( self ): # And finally, count all islands.
_lowercase : Dict = [[False for j in range(self.COL )] for i in range(self.ROW )]
_lowercase : Any = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
count += 1
return count
| 369 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase: Dict = logging.get_logger(__name__)
UpperCAmelCase: str = Dict[str, Any]
UpperCAmelCase: Union[str, Any] = List[Prediction]
@add_end_docstrings(_lowerCAmelCase )
class UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
super().__init__(*_lowercase ,**_lowercase )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self ,"""vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
_lowercase : str = {}
if "threshold" in kwargs:
_lowercase : Tuple = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return super().__call__(*_lowercase ,**_lowercase )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Tuple = load_image(_lowercase )
_lowercase : List[str] = torch.IntTensor([[image.height, image.width]] )
_lowercase : str = self.image_processor(images=[image] ,return_tensors="""pt""" )
if self.tokenizer is not None:
_lowercase : Union[str, Any] = self.tokenizer(text=inputs["""words"""] ,boxes=inputs["""boxes"""] ,return_tensors="""pt""" )
_lowercase : List[Any] = target_size
return inputs
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Optional[Any] = model_inputs.pop("""target_size""" )
_lowercase : int = self.model(**_lowercase )
_lowercase : Tuple = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
_lowercase : Optional[Any] = model_inputs["""bbox"""]
return model_outputs
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=0.9 ):
_lowercase : Optional[int] = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_lowercase , _lowercase : Any = target_size[0].tolist()
def unnormalize(UpperCAmelCase_ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
_lowercase , _lowercase : Optional[Any] = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_lowercase : List[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_lowercase : Union[str, Any] = [unnormalize(_lowercase ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
_lowercase : str = ["""score""", """label""", """box"""]
_lowercase : Optional[int] = [dict(zip(_lowercase ,_lowercase ) ) for vals in zip(scores.tolist() ,_lowercase ,_lowercase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_lowercase : Tuple = self.image_processor.post_process_object_detection(_lowercase ,_lowercase ,_lowercase )
_lowercase : List[str] = raw_annotations[0]
_lowercase : Any = raw_annotation["""scores"""]
_lowercase : Optional[Any] = raw_annotation["""labels"""]
_lowercase : Any = raw_annotation["""boxes"""]
_lowercase : Optional[Any] = scores.tolist()
_lowercase : Any = [self.model.config.idalabel[label.item()] for label in labels]
_lowercase : Tuple = [self._get_bounding_box(_lowercase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_lowercase : str = ["""score""", """label""", """box"""]
_lowercase : int = [
dict(zip(_lowercase ,_lowercase ) )
for vals in zip(raw_annotation["""scores"""] ,raw_annotation["""labels"""] ,raw_annotation["""boxes"""] )
]
return annotation
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
_lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = box.int().tolist()
_lowercase : List[Any] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 370 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : int = []
for line in lines:
_lowercase : Dict = re.sub(R"""#.*""" , """""" , __UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(__UpperCAmelCase )
_lowercase : Tuple = """\n""".join(__UpperCAmelCase )
# Make a hash from all this code
_lowercase : Tuple = full_str.encode("""utf-8""" )
return shaaaa(__UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase: Tuple = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase: List[str] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase: Any = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
UpperCAmelCase: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 336 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase: Optional[int] = logging.get_logger(__name__)
UpperCAmelCase: Optional[Any] = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class UpperCamelCase ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = """visual_bert"""
def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=2 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=False ,UpperCAmelCase_=True ,UpperCAmelCase_=1 ,UpperCAmelCase_=0 ,UpperCAmelCase_=2 ,**UpperCAmelCase_ ,):
super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowercase : List[Any] = vocab_size
_lowercase : Optional[Any] = max_position_embeddings
_lowercase : Dict = hidden_size
_lowercase : str = visual_embedding_dim
_lowercase : List[str] = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : Union[str, Any] = initializer_range
_lowercase : Tuple = type_vocab_size
_lowercase : Tuple = layer_norm_eps
_lowercase : str = bypass_transformer
_lowercase : Optional[int] = special_visual_initialize
| 371 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 336 | 0 |
"""simple docstring"""
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : list[list[int]] = []
_lowercase : list[int] = []
_lowercase : Dict = 0
_lowercase : Any = sum(__a )
create_state_space_tree(__a , __a , __a , __a , __a , __a )
return result
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
if sum(__a ) > max_sum or (remaining_nums_sum + sum(__a )) < max_sum:
return
if sum(__a ) == max_sum:
result.append(__a )
return
for index in range(__a , len(__a ) ):
create_state_space_tree(
__a , __a , index + 1 , [*path, nums[index]] , __a , remaining_nums_sum - nums[index] , )
UpperCAmelCase: List[Any] = [3, 34, 4, 12, 5, 2]
UpperCAmelCase: List[Any] = 9
UpperCAmelCase: Optional[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 350 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCAmelCase: Any = generate_large_matrix()
UpperCAmelCase: Dict = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
_lowercase : List[Any] = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_lowercase : Tuple = (left + right) // 2
_lowercase : List[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_lowercase : Dict = mid + 1
else:
_lowercase : Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Any = 0
_lowercase : Optional[int] = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print("""Running benchmarks""" )
_lowercase : Tuple = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_lowercase : Dict = timeit(F"""{func}(grid=grid)""" , setup=__UpperCAmelCase , number=500 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 336 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=2 ,UpperCAmelCase_=32 ,UpperCAmelCase_=16 ,UpperCAmelCase_=3 ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=32 ,UpperCAmelCase_=4 ,UpperCAmelCase_=[0, 1, 2, 3] ,UpperCAmelCase_=4 ,UpperCAmelCase_=37 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=3 ,UpperCAmelCase_=[1, 3_84, 24, 24] ,UpperCAmelCase_=True ,UpperCAmelCase_=None ,):
_lowercase : Dict = parent
_lowercase : List[str] = batch_size
_lowercase : Optional[Any] = image_size
_lowercase : Tuple = patch_size
_lowercase : Optional[int] = num_channels
_lowercase : Optional[int] = is_training
_lowercase : Dict = use_labels
_lowercase : int = hidden_size
_lowercase : Tuple = num_hidden_layers
_lowercase : Dict = backbone_out_indices
_lowercase : Tuple = num_attention_heads
_lowercase : List[Any] = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : List[Any] = initializer_range
_lowercase : str = num_labels
_lowercase : Optional[int] = backbone_featmap_shape
_lowercase : Tuple = scope
_lowercase : int = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_lowercase : Optional[int] = (image_size // patch_size) ** 2
_lowercase : str = num_patches + 1
def lowerCamelCase__ ( self ):
_lowercase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase : Union[str, Any] = None
if self.use_labels:
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
_lowercase : Any = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self ):
_lowercase : Any = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 1_92, 3_84, 7_68],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,backbone_out_indices=self.backbone_out_indices ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=UpperCAmelCase_ ,initializer_range=self.initializer_range ,is_hybrid=self.is_hybrid ,backbone_config=UpperCAmelCase_ ,backbone_featmap_shape=self.backbone_featmap_shape ,)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[int] = DPTModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase : List[str] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : str = self.num_labels
_lowercase : str = DPTForDepthEstimation(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase : int = model(UpperCAmelCase_ )
self.parent.assertEqual(result.predicted_depth.shape ,(self.batch_size, self.image_size, self.image_size) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Tuple = self.num_labels
_lowercase : Optional[Any] = DPTForSemanticSegmentation(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_lowercase : int = model(UpperCAmelCase_ ,labels=UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCamelCase__ ( self ):
_lowercase : Any = self.prepare_config_and_inputs()
_lowercase : Optional[int] = config_and_inputs
_lowercase : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Tuple = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : int = False
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = DPTModelTester(self )
_lowercase : Tuple = ConfigTester(self ,config_class=UpperCAmelCase_ ,has_text_modality=UpperCAmelCase_ ,hidden_size=37 )
def lowerCamelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[str] = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowercase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ ,nn.Linear ) )
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : int = model_class(UpperCAmelCase_ )
_lowercase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : List[str] = [*signature.parameters.keys()]
_lowercase : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Optional[int] = True
if model_class in get_values(UpperCAmelCase_ ):
continue
_lowercase : Tuple = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.train()
_lowercase : Dict = self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ,return_labels=UpperCAmelCase_ )
_lowercase : Optional[int] = model(**UpperCAmelCase_ ).loss
loss.backward()
def lowerCamelCase__ ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : str = False
_lowercase : Any = True
if model_class in get_values(UpperCAmelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
_lowercase : List[str] = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.gradient_checkpointing_enable()
model.train()
_lowercase : Optional[int] = self._prepare_for_class(UpperCAmelCase_ ,UpperCAmelCase_ ,return_labels=UpperCAmelCase_ )
_lowercase : Optional[Any] = model(**UpperCAmelCase_ ).loss
loss.backward()
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : str = _config_zero_init(UpperCAmelCase_ )
for model_class in self.all_model_classes:
_lowercase : Optional[int] = model_class(config=UpperCAmelCase_ )
# Skip the check for the backbone
_lowercase : Dict = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_lowercase : Optional[Any] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self ):
pass
@slow
def lowerCamelCase__ ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_lowercase : Dict = DPTModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Dict = """add"""
with self.assertRaises(UpperCAmelCase_ ):
_lowercase : Tuple = DPTForDepthEstimation(UpperCAmelCase_ )
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
_lowercase : Optional[int] = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(UpperCAmelCase_ )
_lowercase : Tuple = prepare_img()
_lowercase : Dict = image_processor(images=UpperCAmelCase_ ,return_tensors="""pt""" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
_lowercase : List[str] = model(**UpperCAmelCase_ )
_lowercase : Dict = outputs.predicted_depth
# verify the predicted depth
_lowercase : Tuple = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape ,UpperCAmelCase_ )
_lowercase : Any = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 ,UpperCAmelCase_ ,atol=1E-4 ) )
| 351 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase: List[str] = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase: int = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
re.sub("""<n>""" , """""" , __UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
| 336 | 0 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCamelCase ( __lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : List[Any] = "AutoImageProcessor"
SCREAMING_SNAKE_CASE_ : str = "AutoTokenizer"
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,**UpperCAmelCase_ ):
_lowercase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,snake_case_ ,)
_lowercase : Dict = kwargs.pop("""feature_extractor""" )
_lowercase : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(snake_case_ ,snake_case_ )
_lowercase : Dict = self.image_processor
_lowercase : str = False
def __call__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case_ ,**snake_case_ )
_lowercase : Optional[Any] = kwargs.pop("""images""" ,snake_case_ )
_lowercase : List[str] = kwargs.pop("""text""" ,snake_case_ )
if len(snake_case_ ) > 0:
_lowercase : Any = args[0]
_lowercase : Tuple = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_lowercase : int = self.image_processor(snake_case_ ,*snake_case_ ,**snake_case_ )
if text is not None:
_lowercase : str = self.tokenizer(snake_case_ ,**snake_case_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowercase : Any = encodings['''input_ids''']
return inputs
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.batch_decode(*snake_case_ ,**snake_case_ )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.decode(*snake_case_ ,**snake_case_ )
@contextmanager
def lowerCamelCase__ ( self ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_lowercase : Union[str, Any] = True
_lowercase : Optional[int] = self.tokenizer
yield
_lowercase : Optional[Any] = self.image_processor
_lowercase : Optional[Any] = False
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=False ,UpperCAmelCase_=None ):
if added_vocab is None:
_lowercase : Any = self.tokenizer.get_added_vocab()
_lowercase : Optional[Any] = {}
while tokens:
_lowercase : Any = re.search(R"""<s_(.*?)>""" ,snake_case_ ,re.IGNORECASE )
if start_token is None:
break
_lowercase : List[Any] = start_token.group(1 )
_lowercase : str = re.search(Rf"""</s_{key}>""" ,snake_case_ ,re.IGNORECASE )
_lowercase : Any = start_token.group()
if end_token is None:
_lowercase : Optional[int] = tokens.replace(snake_case_ ,"""""" )
else:
_lowercase : List[Any] = end_token.group()
_lowercase : Tuple = re.escape(snake_case_ )
_lowercase : Union[str, Any] = re.escape(snake_case_ )
_lowercase : Optional[int] = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" ,snake_case_ ,re.IGNORECASE )
if content is not None:
_lowercase : Tuple = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_lowercase : int = self.tokenajson(snake_case_ ,is_inner_value=snake_case_ ,added_vocab=snake_case_ )
if value:
if len(snake_case_ ) == 1:
_lowercase : str = value[0]
_lowercase : Any = value
else: # leaf nodes
_lowercase : Optional[int] = []
for leaf in content.split(R"""<sep/>""" ):
_lowercase : Optional[int] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_lowercase : Dict = leaf[1:-2] # for categorical special tokens
output[key].append(snake_case_ )
if len(output[key] ) == 1:
_lowercase : Tuple = output[key][0]
_lowercase : Union[str, Any] = tokens[tokens.find(snake_case_ ) + len(snake_case_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] ,is_inner_value=snake_case_ ,added_vocab=snake_case_ )
if len(snake_case_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowerCamelCase__ ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,snake_case_ ,)
return self.image_processor_class
@property
def lowerCamelCase__ ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,snake_case_ ,)
return self.image_processor
| 352 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowercase : str = []
for i in range(__UpperCAmelCase ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class UpperCamelCase ( snake_case , snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ : str = 2
@register_to_config
def __init__( self ,UpperCAmelCase_ = 10_00 ,UpperCAmelCase_ = 0.00085 ,UpperCAmelCase_ = 0.012 ,UpperCAmelCase_ = "linear" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = "epsilon" ,UpperCAmelCase_ = "linspace" ,UpperCAmelCase_ = 0 ,):
if trained_betas is not None:
_lowercase : str = torch.tensor(UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
_lowercase : Optional[Any] = torch.linspace(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Any = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,UpperCAmelCase_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Any = betas_for_alpha_bar(UpperCAmelCase_ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
_lowercase : Tuple = 1.0 - self.betas
_lowercase : Dict = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ):
if schedule_timesteps is None:
_lowercase : Optional[int] = self.timesteps
_lowercase : Union[str, Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowercase : Optional[Any] = 1 if len(UpperCAmelCase_ ) > 1 else 0
else:
_lowercase : Dict = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
_lowercase : List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCamelCase__ ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
_lowercase : str = self.index_for_timestep(UpperCAmelCase_ )
if self.state_in_first_order:
_lowercase : Optional[Any] = self.sigmas[step_index]
else:
_lowercase : Dict = self.sigmas_interpol[step_index]
_lowercase : Optional[int] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,):
_lowercase : List[str] = num_inference_steps
_lowercase : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowercase : Dict = np.linspace(0 ,num_train_timesteps - 1 ,UpperCAmelCase_ ,dtype=UpperCAmelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowercase : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : str = (np.arange(0 ,UpperCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCAmelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowercase : str = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : Optional[int] = (np.arange(UpperCAmelCase_ ,0 ,-step_ratio )).round().copy().astype(UpperCAmelCase_ )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_lowercase : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowercase : Optional[Any] = torch.from_numpy(np.log(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_lowercase : List[str] = np.interp(UpperCAmelCase_ ,np.arange(0 ,len(UpperCAmelCase_ ) ) ,UpperCAmelCase_ )
_lowercase : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowercase : Any = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ )
# interpolate sigmas
_lowercase : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
_lowercase : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_lowercase : Tuple = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCAmelCase_ ).startswith("""mps""" ):
# mps does not support float64
_lowercase : Tuple = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=torch.floataa )
else:
_lowercase : str = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ )
# interpolate timesteps
_lowercase : int = self.sigma_to_t(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=timesteps.dtype )
_lowercase : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
_lowercase : str = torch.cat([timesteps[:1], interleaved_timesteps] )
_lowercase : List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowercase : Optional[Any] = defaultdict(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
# get log sigma
_lowercase : Optional[Any] = sigma.log()
# get distribution
_lowercase : Optional[int] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowercase : Tuple = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_lowercase : List[Any] = low_idx + 1
_lowercase : int = self.log_sigmas[low_idx]
_lowercase : Any = self.log_sigmas[high_idx]
# interpolate sigmas
_lowercase : Any = (low - log_sigma) / (low - high)
_lowercase : Dict = w.clamp(0 ,1 )
# transform interpolation to time range
_lowercase : List[str] = (1 - w) * low_idx + w * high_idx
_lowercase : Optional[int] = t.view(sigma.shape )
return t
@property
def lowerCamelCase__ ( self ):
return self.sample is None
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = True ,):
_lowercase : Optional[int] = self.index_for_timestep(UpperCAmelCase_ )
# advance index counter by 1
_lowercase : str = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowercase : Any = self.sigmas[step_index]
_lowercase : Any = self.sigmas_interpol[step_index + 1]
_lowercase : Tuple = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowercase : Union[str, Any] = self.sigmas[step_index - 1]
_lowercase : int = self.sigmas_interpol[step_index]
_lowercase : Tuple = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowercase : Any = 0
_lowercase : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowercase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowercase : str = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowercase : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowercase : Any = sigma_interpol - sigma_hat
# store for 2nd order step
_lowercase : List[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowercase : Optional[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowercase : Optional[Any] = sigma_next - sigma_hat
_lowercase : Any = self.sample
_lowercase : Optional[int] = None
_lowercase : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_lowercase : int = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCAmelCase_ ):
# mps does not support float64
_lowercase : str = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_lowercase : Any = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_lowercase : List[Any] = self.timesteps.to(original_samples.device )
_lowercase : Union[str, Any] = timesteps.to(original_samples.device )
_lowercase : List[Any] = [self.index_for_timestep(UpperCAmelCase_ ,UpperCAmelCase_ ) for t in timesteps]
_lowercase : Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowercase : List[Any] = sigma.unsqueeze(-1 )
_lowercase : int = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 336 | 0 |
"""simple docstring"""
import os
from pathlib import Path
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = {
"""en""": """Machine learning is great, isn\'t it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, nicht wahr?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_lowercase : int = {
"""wmt16-en-de-dist-12-1""": [2_8.3, 2_7.5_2],
"""wmt16-en-de-dist-6-1""": [2_7.4, 2_7.1_1],
"""wmt16-en-de-12-1""": [2_6.9, 2_5.7_5],
}
_lowercase : Optional[int] = F"""{src_lang}-{tgt_lang}"""
_lowercase : Union[str, Any] = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_lowercase : Union[str, Any] = os.path.join(__UpperCAmelCase , """README.md""" )
print(F"""Generating {path}""" )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCAmelCase )
# make sure we are under the root of the project
UpperCAmelCase: Union[str, Any] = Path(__file__).resolve().parent.parent.parent
UpperCAmelCase: Any = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCAmelCase: Any = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 353 |
"""simple docstring"""
import pprint
import requests
UpperCAmelCase: Tuple = """https://zenquotes.io/api"""
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
UpperCAmelCase: int = random_quotes()
pprint.pprint(response)
| 336 | 0 |
"""simple docstring"""
import numpy as np
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase : np.ndarray , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : float = 1E-1_2 , __UpperCAmelCase : int = 100 , ):
assert np.shape(__UpperCAmelCase )[0] == np.shape(__UpperCAmelCase )[1]
# Ensure proper dimensionality.
assert np.shape(__UpperCAmelCase )[0] == np.shape(__UpperCAmelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__UpperCAmelCase ) == np.iscomplexobj(__UpperCAmelCase )
_lowercase : List[Any] = np.iscomplexobj(__UpperCAmelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__UpperCAmelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_lowercase : int = False
_lowercase : List[str] = 0
_lowercase : Dict = 0
_lowercase : Union[str, Any] = 1E1_2
while not convergence:
# Multiple matrix by the vector.
_lowercase : int = np.dot(__UpperCAmelCase , __UpperCAmelCase )
# Normalize the resulting output vector.
_lowercase : List[Any] = w / np.linalg.norm(__UpperCAmelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_lowercase : Optional[Any] = vector.conj().T if is_complex else vector.T
_lowercase : Optional[Any] = np.dot(__UpperCAmelCase , np.dot(__UpperCAmelCase , __UpperCAmelCase ) )
# Check convergence.
_lowercase : List[str] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_lowercase : str = True
_lowercase : Optional[Any] = lambda_
if is_complex:
_lowercase : Any = np.real(lambda_ )
return lambda_, vector
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : List[Any] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_lowercase : Union[str, Any] = np.array([41, 4, 20] )
_lowercase : Dict = real_input_matrix.astype(np.complexaaa )
_lowercase : Optional[int] = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_lowercase : Optional[Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_lowercase : str = real_input_matrix
_lowercase : List[Any] = real_vector
elif problem_type == "complex":
_lowercase : int = complex_input_matrix
_lowercase : Tuple = complex_vector
# Our implementation.
_lowercase , _lowercase : Dict = power_iteration(__UpperCAmelCase , __UpperCAmelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_lowercase , _lowercase : str = np.linalg.eigh(__UpperCAmelCase )
# Last eigenvalue is the maximum one.
_lowercase : Any = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_lowercase : List[str] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__UpperCAmelCase ) - np.abs(__UpperCAmelCase ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 354 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : int
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_lowercase : Tuple = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_lowercase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_lowercase : Optional[Any] = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_lowercase : int = [""""""] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = """Provide a string that I will generate its BWT transform: """
UpperCAmelCase: int = input(entry_msg).strip()
UpperCAmelCase: List[str] = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
UpperCAmelCase: Union[str, Any] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
)
| 336 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=UpperCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ['''torch''', '''scipy''']
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch""", """scipy"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch""", """scipy"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch""", """scipy"""] )
| 355 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Dict = [randint(-1000 , 1000 ) for i in range(10 )]
_lowercase : Tuple = randint(-5000 , 5000 )
return (arr, r)
UpperCAmelCase: int = make_dataset()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
for triplet in permutations(__UpperCAmelCase , 3 ):
if sum(__UpperCAmelCase ) == target:
return tuple(sorted(__UpperCAmelCase ) )
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
arr.sort()
_lowercase : Optional[Any] = len(__UpperCAmelCase )
for i in range(n - 1 ):
_lowercase , _lowercase : str = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Tuple = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_lowercase : Union[str, Any] = """
triplet_sum1(*dataset)
"""
_lowercase : Union[str, Any] = """
triplet_sum2(*dataset)
"""
_lowercase : Dict = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 )
_lowercase : Any = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 )
return (min(__UpperCAmelCase ), min(__UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase: Any = solution_times()
print(F'The time for naive implementation is {times[0]}.')
print(F'The time for optimized implementation is {times[1]}.')
| 336 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DiTPipeline
SCREAMING_SNAKE_CASE_ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE_ : List[str] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
SCREAMING_SNAKE_CASE_ : List[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ : Tuple = False
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_lowercase : Union[str, Any] = TransformeraDModel(
sample_size=16 ,num_layers=2 ,patch_size=4 ,attention_head_dim=8 ,num_attention_heads=2 ,in_channels=4 ,out_channels=8 ,attention_bias=__lowerCamelCase ,activation_fn="""gelu-approximate""" ,num_embeds_ada_norm=10_00 ,norm_type="""ada_norm_zero""" ,norm_elementwise_affine=__lowerCamelCase ,)
_lowercase : Any = AutoencoderKL()
_lowercase : Optional[Any] = DDIMScheduler()
_lowercase : Optional[Any] = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=0 ):
if str(__lowerCamelCase ).startswith("""mps""" ):
_lowercase : str = torch.manual_seed(__lowerCamelCase )
else:
_lowercase : Dict = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
_lowercase : Any = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase__ ( self ):
_lowercase : Dict = '''cpu'''
_lowercase : List[Any] = self.get_dummy_components()
_lowercase : Tuple = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_lowercase : List[str] = self.get_dummy_inputs(__lowerCamelCase )
_lowercase : Union[str, Any] = pipe(**__lowerCamelCase ).images
_lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 16, 16, 3) )
_lowercase : Dict = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_lowercase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase ,1E-3 )
def lowerCamelCase__ ( self ):
self._test_inference_batch_single_identical(relax_max_difference=__lowerCamelCase ,expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def lowerCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ):
_lowercase : Any = torch.manual_seed(0 )
_lowercase : Optional[Any] = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowercase : List[str] = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
_lowercase : str = pipe.get_label_ids(__lowerCamelCase )
_lowercase : List[str] = pipe(__lowerCamelCase ,generator=__lowerCamelCase ,num_inference_steps=40 ,output_type="""np""" ).images
for word, image in zip(__lowerCamelCase ,__lowerCamelCase ):
_lowercase : Optional[Any] = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowerCamelCase__ ( self ):
_lowercase : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowercase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowercase : Dict = ['''vase''', '''umbrella''']
_lowercase : Optional[Any] = pipe.get_label_ids(__lowerCamelCase )
_lowercase : Tuple = torch.manual_seed(0 )
_lowercase : Tuple = pipe(__lowerCamelCase ,generator=__lowerCamelCase ,num_inference_steps=25 ,output_type="""np""" ).images
for word, image in zip(__lowerCamelCase ,__lowerCamelCase ):
_lowercase : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 356 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : Optional[int] = "BlipImageProcessor"
SCREAMING_SNAKE_CASE_ : Tuple = "AutoTokenizer"
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ )
# add QFormer tokenizer
_lowercase : Optional[int] = qformer_tokenizer
def __call__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
_lowercase : List[Any] = BatchFeature()
if text is not None:
_lowercase : List[str] = self.tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
encoding.update(UpperCAmelCase_ )
_lowercase : Dict = self.qformer_tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
_lowercase : str = qformer_text_encoding.pop("""input_ids""" )
_lowercase : int = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
_lowercase : Optional[Any] = self.image_processor(UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ )
encoding.update(UpperCAmelCase_ )
return encoding
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.tokenizer.model_input_names
_lowercase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
if os.path.isfile(UpperCAmelCase_ ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCAmelCase_ ,exist_ok=UpperCAmelCase_ )
_lowercase : Any = os.path.join(UpperCAmelCase_ ,"""qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(UpperCAmelCase_ )
return super().save_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
_lowercase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ ,subfolder="""qformer_tokenizer""" )
_lowercase : Any = cls._get_arguments_from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
args.append(UpperCAmelCase_ )
return cls(*UpperCAmelCase_ )
| 336 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
SCREAMING_SNAKE_CASE_ : Tuple = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
SCREAMING_SNAKE_CASE_ : int = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = pipeline(
task="""text-classification""" ,model="""hf-internal-testing/tiny-random-distilbert""" ,framework="""pt""" )
_lowercase : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_A ) ,[{"""label""": """LABEL_0""", """score""": 0.504}] )
_lowercase : Optional[int] = text_classifier("""This is great !""" ,top_k=2 )
self.assertEqual(
nested_simplify(_A ) ,[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
_lowercase : Dict = text_classifier(["""This is great !""", """This is bad"""] ,top_k=2 )
self.assertEqual(
nested_simplify(_A ) ,[
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] ,)
_lowercase : List[str] = text_classifier("""This is great !""" ,top_k=1 )
self.assertEqual(nested_simplify(_A ) ,[{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
_lowercase : str = text_classifier("""This is great !""" ,return_all_scores=_A )
self.assertEqual(nested_simplify(_A ) ,[{"""label""": """LABEL_0""", """score""": 0.504}] )
_lowercase : List[Any] = text_classifier("""This is great !""" ,return_all_scores=_A )
self.assertEqual(
nested_simplify(_A ) ,[[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
_lowercase : Optional[Any] = text_classifier(["""This is great !""", """Something else"""] ,return_all_scores=_A )
self.assertEqual(
nested_simplify(_A ) ,[
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] ,)
_lowercase : Any = text_classifier(["""This is great !""", """Something else"""] ,return_all_scores=_A )
self.assertEqual(
nested_simplify(_A ) ,[
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] ,)
@require_torch
def lowerCamelCase__ ( self ):
import torch
_lowercase : Union[str, Any] = pipeline(
task="""text-classification""" ,model="""hf-internal-testing/tiny-random-distilbert""" ,framework="""pt""" ,device=torch.device("""cpu""" ) ,)
_lowercase : List[str] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_A ) ,[{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def lowerCamelCase__ ( self ):
_lowercase : Tuple = pipeline(
task="""text-classification""" ,model="""hf-internal-testing/tiny-random-distilbert""" ,framework="""tf""" )
_lowercase : Dict = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_A ) ,[{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : str = pipeline("""text-classification""" )
_lowercase : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_A ) ,[{"""label""": """POSITIVE""", """score""": 1.0}] )
_lowercase : Union[str, Any] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_A ) ,[{"""label""": """NEGATIVE""", """score""": 1.0}] )
_lowercase : Union[str, Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_A ) ,[{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def lowerCamelCase__ ( self ):
_lowercase : Tuple = pipeline("""text-classification""" ,framework="""tf""" )
_lowercase : Dict = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_A ) ,[{"""label""": """POSITIVE""", """score""": 1.0}] )
_lowercase : str = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_A ) ,[{"""label""": """NEGATIVE""", """score""": 1.0}] )
_lowercase : Optional[int] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_A ) ,[{"""label""": """POSITIVE""", """score""": 0.988}] )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : List[Any] = TextClassificationPipeline(model=_A ,tokenizer=_A )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Tuple = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
_lowercase : str = 'HuggingFace is in'
_lowercase : Optional[Any] = text_classifier(_A )
self.assertEqual(nested_simplify(_A ) ,[{"""label""": ANY(_A ), """score""": ANY(_A )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
_lowercase : List[str] = ['HuggingFace is in ', 'Paris is in France']
_lowercase : Any = text_classifier(_A )
self.assertEqual(
nested_simplify(_A ) ,[{"""label""": ANY(_A ), """score""": ANY(_A )}, {"""label""": ANY(_A ), """score""": ANY(_A )}] ,)
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
_lowercase : List[str] = text_classifier(_A ,top_k=_A )
_lowercase : Optional[Any] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_A ) ,[[{"""label""": ANY(_A ), """score""": ANY(_A )}] * N, [{"""label""": ANY(_A ), """score""": ANY(_A )}] * N] ,)
_lowercase : Union[str, Any] = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
_lowercase : Optional[Any] = text_classifier(_A )
self.assertEqual(
nested_simplify(_A ) ,{"""label""": ANY(_A ), """score""": ANY(_A )} ,)
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
_lowercase : Optional[Any] = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(_A ):
text_classifier(_A )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
_lowercase : List[str] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(_A ) ,[{"""label""": ANY(_A ), """score""": ANY(_A )}] ,)
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 357 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase: Tuple = logging.get_logger(__name__)
UpperCAmelCase: List[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ : Optional[int] = ["past_key_values"]
SCREAMING_SNAKE_CASE_ : Tuple = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self ,UpperCAmelCase_=1_00 ,UpperCAmelCase_=5 ,UpperCAmelCase_=1 ,UpperCAmelCase_=1 ,UpperCAmelCase_=2_49 ,UpperCAmelCase_=6 ,UpperCAmelCase_=17 ,UpperCAmelCase_=25 ,UpperCAmelCase_=4 ,UpperCAmelCase_=4 ,UpperCAmelCase_=1_28 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.0006 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=1 ,UpperCAmelCase_=True ,UpperCAmelCase_=1 ,UpperCAmelCase_=5_02_56 ,UpperCAmelCase_=5_02_56 ,**UpperCAmelCase_ ,):
_lowercase : Dict = vocab_size
_lowercase : List[str] = action_weight
_lowercase : int = reward_weight
_lowercase : List[Any] = value_weight
_lowercase : List[str] = max_position_embeddings
_lowercase : Any = block_size
_lowercase : Any = action_dim
_lowercase : List[str] = observation_dim
_lowercase : Union[str, Any] = transition_dim
_lowercase : str = learning_rate
_lowercase : Tuple = n_layer
_lowercase : Optional[int] = n_head
_lowercase : List[str] = n_embd
_lowercase : List[str] = embd_pdrop
_lowercase : Optional[Any] = attn_pdrop
_lowercase : List[Any] = resid_pdrop
_lowercase : str = initializer_range
_lowercase : Optional[Any] = layer_norm_eps
_lowercase : List[Any] = kaiming_initializer_range
_lowercase : List[Any] = use_cache
super().__init__(pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
| 336 | 0 |
from random import randint, random
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = 5 , ):
_lowercase : Union[str, Any] = [[-1] * number_of_cells] # Create a highway without any car
_lowercase : Optional[int] = 0
_lowercase : int = max(_A , 0 )
while i < number_of_cells:
_lowercase : Optional[Any] = (
randint(0 , _A ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Dict = 0
_lowercase : Optional[int] = highway_now[car_index + 1 :]
for cell in range(len(_A ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(_A , -1 )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Any = len(_A )
# Beforce calculations, the highway is empty
_lowercase : Dict = [-1] * number_of_cells
for car_index in range(_A ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_lowercase : Tuple = min(highway_now[car_index] + 1 , _A )
# Number of empty cell before the next car
_lowercase : Optional[Any] = get_distance(_A , _A ) - 1
# We can't have the car causing an accident
_lowercase : List[Any] = min(next_highway[car_index] , _A )
if random() < probability:
# Randomly, a driver will slow down
_lowercase : List[str] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Optional[Any] = len(highway[0] )
for i in range(_A ):
_lowercase : Optional[int] = update(highway[i] , _A , _A )
_lowercase : List[str] = [-1] * number_of_cells
for car_index in range(_A ):
_lowercase : Dict = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_lowercase : List[Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_lowercase : Optional[Any] = speed
highway.append(_A )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase: Any = logging.get_logger(__name__)
UpperCAmelCase: List[str] = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "instructblip_vision_model"
def __init__( self ,UpperCAmelCase_=14_08 ,UpperCAmelCase_=61_44 ,UpperCAmelCase_=39 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=14 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=1E-6 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=1E-10 ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Tuple = intermediate_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = patch_size
_lowercase : Optional[Any] = image_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[Any] = attention_dropout
_lowercase : List[Any] = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : Tuple = qkv_bias
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : List[Any] = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "instructblip_qformer"
def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=0 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=2 ,UpperCAmelCase_=14_08 ,**UpperCAmelCase_ ,):
super().__init__(pad_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : List[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : Optional[Any] = hidden_act
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : List[Any] = max_position_embeddings
_lowercase : Tuple = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Any = position_embedding_type
_lowercase : Dict = cross_attention_frequency
_lowercase : Optional[Any] = encoder_hidden_size
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : Dict = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : str = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "instructblip"
SCREAMING_SNAKE_CASE_ : List[str] = True
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=32 ,**UpperCAmelCase_ ):
super().__init__(**UpperCAmelCase_ )
if vision_config is None:
_lowercase : str = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
_lowercase : Any = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
_lowercase : Optional[int] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
_lowercase : int = InstructBlipVisionConfig(**UpperCAmelCase_ )
_lowercase : Optional[int] = InstructBlipQFormerConfig(**UpperCAmelCase_ )
_lowercase : Dict = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
_lowercase : str = CONFIG_MAPPING[text_model_type](**UpperCAmelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : Union[str, Any] = self.text_config.is_encoder_decoder
_lowercase : List[str] = num_query_tokens
_lowercase : List[str] = self.vision_config.hidden_size
_lowercase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : Union[str, Any] = 1.0
_lowercase : Dict = 0.02
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowercase : int = self.vision_config.to_dict()
_lowercase : Any = self.qformer_config.to_dict()
_lowercase : Any = self.text_config.to_dict()
_lowercase : Optional[int] = self.__class__.model_type
return output
| 336 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
UpperCAmelCase: Any = "\nHuman: <<task>>\n\nAssistant: "
UpperCAmelCase: List[Any] = "huggingface-tools/default-prompts"
UpperCAmelCase: List[str] = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="run" ):
if prompt_or_repo_id is None:
_lowercase : List[Any] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""" , lowerCAmelCase__ ) is not None:
return prompt_or_repo_id
_lowercase : int = cached_file(
lowerCAmelCase__ , PROMPT_FILES[mode] , repo_type="""dataset""" , user_agent={"""agent""": agent_name} )
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
return f.read()
| 359 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if k in (0.04, 0.06):
_lowercase : Optional[Any] = k
_lowercase : Optional[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self ):
return str(self.k )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 )
_lowercase , _lowercase : Dict = img.shape
_lowercase : list[list[int]] = []
_lowercase : int = img.copy()
_lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB )
_lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ )
_lowercase : Optional[int] = dx**2
_lowercase : Optional[Any] = dy**2
_lowercase : Optional[Any] = dx * dy
_lowercase : List[str] = 0.04
_lowercase : Optional[Any] = self.window_size // 2
for y in range(UpperCAmelCase_ ,h - offset ):
for x in range(UpperCAmelCase_ ,w - offset ):
_lowercase : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Dict = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Union[str, Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : int = (wxx * wyy) - (wxy**2)
_lowercase : Union[str, Any] = wxx + wyy
_lowercase : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,2_55 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3)
UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 336 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase ( __a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = """ClapFeatureExtractor"""
SCREAMING_SNAKE_CASE_ : List[str] = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
super().__init__(a__ ,a__ )
def __call__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,**UpperCAmelCase_ ):
_lowercase : List[str] = kwargs.pop("""sampling_rate""" ,a__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
_lowercase : str = self.tokenizer(a__ ,return_tensors=a__ ,**a__ )
if audios is not None:
_lowercase : Optional[Any] = self.feature_extractor(
a__ ,sampling_rate=a__ ,return_tensors=a__ ,**a__ )
if text is not None and audios is not None:
_lowercase : Tuple = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) ,tensor_type=a__ )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.batch_decode(*a__ ,**a__ )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.decode(*a__ ,**a__ )
@property
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.tokenizer.model_input_names
_lowercase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 360 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = LEDTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = LEDTokenizerFast
SCREAMING_SNAKE_CASE_ : List[str] = True
def lowerCamelCase__ ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowercase : List[Any] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowercase : Dict = {"""unk_token""": """<unk>"""}
_lowercase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowercase : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(UpperCAmelCase_ ,max_length=len(UpperCAmelCase_ ) ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
_lowercase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIn("""input_ids""" ,UpperCAmelCase_ )
self.assertIn("""attention_mask""" ,UpperCAmelCase_ )
self.assertNotIn("""labels""" ,UpperCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(text_target=UpperCAmelCase_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" )
self.assertEqual(32 ,targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : List[Any] = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""] ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual(batch.input_ids.shape ,(2, 51_22) )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = ["""A long paragraph for summarization."""]
_lowercase : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : List[str] = tokenizer(text_target=UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : Union[str, Any] = inputs["""input_ids"""]
_lowercase : List[str] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : str = ["""Summary of the text.""", """Another summary."""]
_lowercase : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowercase : Any = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ )
_lowercase : str = [[0] * len(UpperCAmelCase_ ) for x in encoded_output["""input_ids"""]]
_lowercase : Optional[int] = tokenizer.pad(UpperCAmelCase_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Dict = """A, <mask> AllenNLP sentence."""
_lowercase : List[Any] = tokenizer_r.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
_lowercase : Any = tokenizer_p.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
_lowercase : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 336 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase: Optional[Any] = {
"configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"],
"feature_extraction_whisper": ["WhisperFeatureExtractor"],
"processing_whisper": ["WhisperProcessor"],
"tokenization_whisper": ["WhisperTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: List[str] = ["WhisperTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: List[Any] = [
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"WhisperForConditionalGeneration",
"WhisperModel",
"WhisperPreTrainedModel",
"WhisperForAudioClassification",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Tuple = [
"TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWhisperForConditionalGeneration",
"TFWhisperModel",
"TFWhisperPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: List[str] = [
"FlaxWhisperForConditionalGeneration",
"FlaxWhisperModel",
"FlaxWhisperPreTrainedModel",
"FlaxWhisperForAudioClassification",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCAmelCase: int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 361 |
"""simple docstring"""
import argparse
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Any = f.readlines()
_lowercase : Optional[int] = F"""class {class_name}("""
_lowercase : List[str] = F"""{4 * " "}def {test_name}("""
_lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}"""
_lowercase : int = F"""{16 * " "}{correct_line.split()[0]}"""
_lowercase : str = False
_lowercase : Optional[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = False
_lowercase : int = 0
_lowercase : Tuple = 0
_lowercase : Union[str, Any] = []
for line in lines:
if line.startswith(__UpperCAmelCase ):
_lowercase : List[str] = True
elif in_class and line.startswith(__UpperCAmelCase ):
_lowercase : str = True
elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )):
_lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : Optional[int] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Optional[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_lowercase : Union[str, Any] = False
else:
new_lines.append(__UpperCAmelCase )
with open(__UpperCAmelCase , """w""" ) as f:
for line in new_lines:
f.write(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ):
if fail is not None:
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Dict = {l.strip() for l in f.readlines()}
else:
_lowercase : int = None
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : int = f.readlines()
_lowercase : int = defaultdict(__UpperCAmelCase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : int = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase: List[Any] = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
UpperCAmelCase: Any = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 336 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase: Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase: List[str] = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "informer"
SCREAMING_SNAKE_CASE_ : Dict = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = "student_t" ,UpperCAmelCase_ = "nll" ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = "mean" ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 64 ,UpperCAmelCase_ = 32 ,UpperCAmelCase_ = 32 ,UpperCAmelCase_ = 2 ,UpperCAmelCase_ = 2 ,UpperCAmelCase_ = 2 ,UpperCAmelCase_ = 2 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = "gelu" ,UpperCAmelCase_ = 0.05 ,UpperCAmelCase_ = 0.1 ,UpperCAmelCase_ = 0.1 ,UpperCAmelCase_ = 0.1 ,UpperCAmelCase_ = 0.1 ,UpperCAmelCase_ = 1_00 ,UpperCAmelCase_ = 0.02 ,UpperCAmelCase_=True ,UpperCAmelCase_ = "prob" ,UpperCAmelCase_ = 5 ,UpperCAmelCase_ = True ,**UpperCAmelCase_ ,):
_lowercase : Optional[Any] = prediction_length
_lowercase : List[str] = context_length or prediction_length
_lowercase : Union[str, Any] = distribution_output
_lowercase : List[Any] = loss
_lowercase : Optional[Any] = input_size
_lowercase : str = num_time_features
_lowercase : Union[str, Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
_lowercase : Union[str, Any] = scaling
_lowercase : Tuple = num_dynamic_real_features
_lowercase : List[Any] = num_static_real_features
_lowercase : Tuple = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
_lowercase : Optional[Any] = cardinality
else:
_lowercase : str = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
_lowercase : int = embedding_dimension
else:
_lowercase : Union[str, Any] = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
_lowercase : List[Any] = num_parallel_samples
# Transformer architecture configuration
_lowercase : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
_lowercase : Dict = d_model
_lowercase : Any = encoder_attention_heads
_lowercase : Union[str, Any] = decoder_attention_heads
_lowercase : Tuple = encoder_ffn_dim
_lowercase : List[Any] = decoder_ffn_dim
_lowercase : List[str] = encoder_layers
_lowercase : Union[str, Any] = decoder_layers
_lowercase : Dict = dropout
_lowercase : Dict = attention_dropout
_lowercase : Union[str, Any] = activation_dropout
_lowercase : Dict = encoder_layerdrop
_lowercase : Optional[int] = decoder_layerdrop
_lowercase : Any = activation_function
_lowercase : Optional[int] = init_std
_lowercase : Any = use_cache
# Informer
_lowercase : Optional[Any] = attention_type
_lowercase : Dict = sampling_factor
_lowercase : Tuple = distil
super().__init__(is_encoder_decoder=__a ,**__a )
@property
def lowerCamelCase__ ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 362 |
"""simple docstring"""
UpperCAmelCase: List[str] = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 336 | 0 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCAmelCase: Tuple = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , ):
output_path.parent.mkdir(parents=UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCAmelCase__ , UpperCAmelCase__ , f=output_path.as_posix() , input_names=UpperCAmelCase__ , output_names=UpperCAmelCase__ , dynamic_axes=UpperCAmelCase__ , do_constant_folding=UpperCAmelCase__ , use_external_data_format=UpperCAmelCase__ , enable_onnx_checker=UpperCAmelCase__ , opset_version=UpperCAmelCase__ , )
else:
export(
UpperCAmelCase__ , UpperCAmelCase__ , f=output_path.as_posix() , input_names=UpperCAmelCase__ , output_names=UpperCAmelCase__ , dynamic_axes=UpperCAmelCase__ , do_constant_folding=UpperCAmelCase__ , opset_version=UpperCAmelCase__ , )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ):
_lowercase : List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_lowercase : Optional[Any] = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
_lowercase : Any = """cpu"""
_lowercase : List[Any] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=UpperCAmelCase__ ).to(UpperCAmelCase__ )
_lowercase : Any = Path(UpperCAmelCase__ )
# TEXT ENCODER
_lowercase : int = pipeline.text_encoder.config.max_position_embeddings
_lowercase : str = pipeline.text_encoder.config.hidden_size
_lowercase : List[str] = pipeline.tokenizer(
"""A sample prompt""" , padding="""max_length""" , max_length=pipeline.tokenizer.model_max_length , truncation=UpperCAmelCase__ , return_tensors="""pt""" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=UpperCAmelCase__ , dtype=torch.intaa )) , output_path=output_path / """text_encoder""" / """model.onnx""" , ordered_input_names=["""input_ids"""] , output_names=["""last_hidden_state""", """pooler_output"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """sequence"""},
} , opset=UpperCAmelCase__ , )
del pipeline.text_encoder
# UNET
_lowercase : Dict = pipeline.unet.config.in_channels
_lowercase : Union[str, Any] = pipeline.unet.config.sample_size
_lowercase : Optional[int] = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ),
torch.randn(2 ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ),
torch.randn(2 , UpperCAmelCase__ , UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ),
False,
) , output_path=UpperCAmelCase__ , ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] , output_names=["""out_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""timestep""": {0: """batch"""},
"""encoder_hidden_states""": {0: """batch""", 1: """sequence"""},
} , opset=UpperCAmelCase__ , use_external_data_format=UpperCAmelCase__ , )
_lowercase : Any = str(unet_path.absolute().as_posix() )
_lowercase : List[str] = os.path.dirname(UpperCAmelCase__ )
_lowercase : List[Any] = onnx.load(UpperCAmelCase__ )
# clean up existing tensor files
shutil.rmtree(UpperCAmelCase__ )
os.mkdir(UpperCAmelCase__ )
# collate external tensor files into one
onnx.save_model(
UpperCAmelCase__ , UpperCAmelCase__ , save_as_external_data=UpperCAmelCase__ , all_tensors_to_one_file=UpperCAmelCase__ , location="""weights.pb""" , convert_attribute=UpperCAmelCase__ , )
del pipeline.unet
# VAE ENCODER
_lowercase : Optional[Any] = pipeline.vae
_lowercase : Any = vae_encoder.config.in_channels
_lowercase : Tuple = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
_lowercase : int = lambda __UpperCAmelCase , __UpperCAmelCase : vae_encoder.encode(UpperCAmelCase__ , UpperCAmelCase__ )[0].sample()
onnx_export(
UpperCAmelCase__ , model_args=(
torch.randn(1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ),
False,
) , output_path=output_path / """vae_encoder""" / """model.onnx""" , ordered_input_names=["""sample""", """return_dict"""] , output_names=["""latent_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=UpperCAmelCase__ , )
# VAE DECODER
_lowercase : List[Any] = pipeline.vae
_lowercase : Optional[int] = vae_decoder.config.latent_channels
_lowercase : Optional[int] = vae_decoder.config.out_channels
# forward only through the decoder part
_lowercase : List[str] = vae_encoder.decode
onnx_export(
UpperCAmelCase__ , model_args=(
torch.randn(1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=UpperCAmelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
_lowercase : Dict = pipeline.safety_checker
_lowercase : Tuple = safety_checker.config.vision_config.num_channels
_lowercase : Union[str, Any] = safety_checker.config.vision_config.image_size
_lowercase : str = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ),
torch.randn(1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ),
) , output_path=output_path / """safety_checker""" / """model.onnx""" , ordered_input_names=["""clip_input""", """images"""] , output_names=["""out_images""", """has_nsfw_concepts"""] , dynamic_axes={
"""clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""},
} , opset=UpperCAmelCase__ , )
del pipeline.safety_checker
_lowercase : Optional[Any] = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" )
_lowercase : str = pipeline.feature_extractor
else:
_lowercase : int = None
_lowercase : int = None
_lowercase : List[str] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) , scheduler=pipeline.scheduler , safety_checker=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(UpperCAmelCase__ )
print("""ONNX pipeline saved to""" , UpperCAmelCase__ )
del pipeline
del onnx_pipeline
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(UpperCAmelCase__ , provider="""CPUExecutionProvider""" )
print("""ONNX pipeline is loadable""" )
if __name__ == "__main__":
UpperCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
UpperCAmelCase: Dict = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 363 |
"""simple docstring"""
UpperCAmelCase: str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCAmelCase: Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCAmelCase: int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 336 | 0 |
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_=10_24 ,UpperCAmelCase_=10_24 ,UpperCAmelCase_=3.6 ):
_lowercase : List[str] = tokenizer
_lowercase : List[Any] = tokenizer.bos_token_id
_lowercase : List[str] = dataset
_lowercase : List[Any] = seq_length
_lowercase : Any = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
_lowercase : List[str] = iter(self.dataset )
_lowercase : Dict = True
while more_examples:
_lowercase , _lowercase : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_A )["""content"""] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowercase : Optional[int] = False
break
_lowercase : int = tokenizer(_A ,truncation=_A )["""input_ids"""]
_lowercase : List[Any] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 ,len(_A ) ,self.seq_length ):
_lowercase : List[Any] = all_token_ids[i : i + self.seq_length]
if len(_A ) == self.seq_length:
yield torch.tensor(_A )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : int = {"""streaming""": True}
_lowercase : Optional[Any] = load_dataset(args.dataset_name , split="""train""" , **SCREAMING_SNAKE_CASE_ )
_lowercase : str = ConstantLengthDataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , seq_length=args.seq_length )
_lowercase : Union[str, Any] = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=args.batch_size )
return eval_dataloader
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
model.eval()
_lowercase : str = []
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
with torch.no_grad():
_lowercase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
_lowercase : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(SCREAMING_SNAKE_CASE_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowercase : str = torch.mean(torch.cat(SCREAMING_SNAKE_CASE_ ) )
try:
_lowercase : Any = torch.exp(SCREAMING_SNAKE_CASE_ )
except OverflowError:
_lowercase : Any = float("""inf""" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCAmelCase: Dict = Accelerator()
# Parse configuration
UpperCAmelCase: Tuple = HfArgumentParser(EvaluationArguments)
UpperCAmelCase: Dict = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCAmelCase: Optional[int] = logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
UpperCAmelCase: Union[str, Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCAmelCase: int = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCAmelCase: Optional[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCAmelCase , UpperCAmelCase: int = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
UpperCAmelCase , UpperCAmelCase: Dict = evaluate(args)
logger.info(F'loss/eval: {eval_loss}, perplexity: {perplexity}')
| 364 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
UpperCAmelCase: List[Any] = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
def constraint_to_multiple_of(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=None ):
_lowercase : Union[str, Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowercase : str = math.floor(val / multiple ) * multiple
if x < min_val:
_lowercase : Dict = math.ceil(val / multiple ) * multiple
return x
_lowercase : List[str] = (output_size, output_size) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else output_size
_lowercase , _lowercase : List[Any] = get_image_size(__UpperCAmelCase )
_lowercase , _lowercase : Union[str, Any] = output_size
# determine new height and width
_lowercase : str = output_height / input_height
_lowercase : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowercase : str = scale_width
else:
# fit height
_lowercase : int = scale_height
_lowercase : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=__UpperCAmelCase )
_lowercase : Optional[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__UpperCAmelCase )
return (new_height, new_width)
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["pixel_values"]
def __init__( self ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = 1 / 2_55 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : List[Any] = size if size is not None else {"""height""": 3_84, """width""": 3_84}
_lowercase : str = get_size_dict(UpperCAmelCase_ )
_lowercase : Tuple = do_resize
_lowercase : Any = size
_lowercase : List[Any] = keep_aspect_ratio
_lowercase : Any = ensure_multiple_of
_lowercase : str = resample
_lowercase : Optional[Any] = do_rescale
_lowercase : List[Any] = rescale_factor
_lowercase : Union[str, Any] = do_normalize
_lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = PILImageResampling.BICUBIC ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
_lowercase : Optional[Any] = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_lowercase : Dict = get_resize_output_image_size(
UpperCAmelCase_ ,output_size=(size["""height"""], size["""width"""]) ,keep_aspect_ratio=UpperCAmelCase_ ,multiple=UpperCAmelCase_ ,)
return resize(UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return rescale(UpperCAmelCase_ ,scale=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return normalize(UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = ChannelDimension.FIRST ,**UpperCAmelCase_ ,):
_lowercase : Any = do_resize if do_resize is not None else self.do_resize
_lowercase : List[str] = size if size is not None else self.size
_lowercase : int = get_size_dict(UpperCAmelCase_ )
_lowercase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowercase : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowercase : List[str] = resample if resample is not None else self.resample
_lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : str = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_lowercase : int = image_std if image_std is not None else self.image_std
_lowercase : Union[str, Any] = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowercase : int = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
_lowercase : Union[str, Any] = [self.resize(image=UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ) for image in images]
if do_rescale:
_lowercase : int = [self.rescale(image=UpperCAmelCase_ ,scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
_lowercase : str = [self.normalize(image=UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ) for image in images]
_lowercase : Tuple = [to_channel_dimension_format(UpperCAmelCase_ ,UpperCAmelCase_ ) for image in images]
_lowercase : int = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase_ ,tensor_type=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(UpperCAmelCase_ ):
_lowercase : Tuple = target_sizes.numpy()
_lowercase : Optional[Any] = []
for idx in range(len(UpperCAmelCase_ ) ):
_lowercase : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=UpperCAmelCase_ )
_lowercase : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase_ )
else:
_lowercase : Union[str, Any] = logits.argmax(dim=1 )
_lowercase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 336 | 0 |
"""simple docstring"""
from typing import Any
import numpy as np
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return np.array_equal(snake_case__ , matrix.conjugate().T )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = v.conjugate().T
_lowercase : int = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : List[str] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
_lowercase : Any = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
_lowercase : List[Any] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 365 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCAmelCase: Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCAmelCase: Tuple = [0, 25, 50]
UpperCAmelCase: List[Any] = [25, 50, 75]
UpperCAmelCase: Optional[int] = fuzz.membership.trimf(X, abca)
UpperCAmelCase: Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCAmelCase: List[Any] = np.ones(75)
UpperCAmelCase: Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCAmelCase: str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCAmelCase: List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCAmelCase: int = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCAmelCase: int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCAmelCase: List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCAmelCase: int = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 336 | 0 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase: str = logging.get_logger(__name__)
class UpperCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "vision-encoder-decoder"
SCREAMING_SNAKE_CASE_ : Dict = True
def __init__( self ,**UpperCAmelCase_ ):
super().__init__(**UpperCAmelCase_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
_lowercase : List[str] = kwargs.pop("""encoder""" )
_lowercase : str = encoder_config.pop("""model_type""" )
_lowercase : Dict = kwargs.pop("""decoder""" )
_lowercase : List[str] = decoder_config.pop("""model_type""" )
_lowercase : Dict = AutoConfig.for_model(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : str = AutoConfig.for_model(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Any = True
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_lowercase : Any = True
_lowercase : List[Any] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = copy.deepcopy(self.__dict__ )
_lowercase : Any = self.encoder.to_dict()
_lowercase : int = self.decoder.to_dict()
_lowercase : Union[str, Any] = self.__class__.model_type
return output
class UpperCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = version.parse("1.11" )
@property
def lowerCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ ( self ):
return 1E-4
@property
def lowerCamelCase__ ( self ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class UpperCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def lowerCamelCase__ ( self ):
_lowercase : Tuple = OrderedDict()
_lowercase : List[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_lowercase : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_lowercase : List[str] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = -1 ,UpperCAmelCase_ = -1 ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,):
import torch
_lowercase : Optional[Any] = OrderedDict()
_lowercase : Optional[Any] = super().generate_dummy_inputs(
UpperCAmelCase_ ,batch_size=UpperCAmelCase_ ,seq_length=UpperCAmelCase_ ,is_pair=UpperCAmelCase_ ,framework=UpperCAmelCase_ )
_lowercase , _lowercase : str = dummy_input["""input_ids"""].shape
_lowercase : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowercase : Optional[int] = dummy_input.pop("""input_ids""" )
_lowercase : Dict = dummy_input.pop("""attention_mask""" )
_lowercase : Optional[Any] = torch.zeros(UpperCAmelCase_ )
return common_inputs
class UpperCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return VisionEncoderDecoderEncoderOnnxConfig(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = "default" ):
_lowercase : Optional[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(UpperCAmelCase_ ,UpperCAmelCase_ )
| 366 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : str = tempfile.mkdtemp()
# fmt: off
_lowercase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowercase : Optional[int] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
_lowercase : Dict = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
_lowercase : List[Any] = os.path.join(self.tmpdirname ,UpperCAmelCase_ )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
_lowercase : Tuple = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : List[Any] = self.get_rust_tokenizer()
_lowercase : List[Any] = self.get_image_processor()
_lowercase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
_lowercase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCAmelCase_ )
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
_lowercase : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_lowercase : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
_lowercase : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[int] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : int = self.prepare_image_inputs()
_lowercase : str = image_processor(UpperCAmelCase_ ,return_tensors="""np""" )
_lowercase : int = processor(images=UpperCAmelCase_ ,return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : List[Any] = """lower newer"""
_lowercase : Any = processor(text=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : str = """lower newer"""
_lowercase : List[Any] = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : int = processor.batch_decode(UpperCAmelCase_ )
_lowercase : Tuple = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Optional[Any] = """lower newer"""
_lowercase : Any = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 336 | 0 |
"""simple docstring"""
import copy
import re
class UpperCamelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = """hp"""
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : List[str] = None
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : List[str] = prefix
_lowercase : Optional[Any] = defaults
cls.build_naming_info()
@staticmethod
def lowerCamelCase__ ( UpperCAmelCase_ ,UpperCAmelCase_ ):
if len(__lowercase ) == 0:
return ""
_lowercase : List[str] = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 ,len(__lowercase ) + 1 ):
_lowercase : Dict = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_lowercase : Dict = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(UpperCAmelCase_ ):
_lowercase : List[str] = ''''''
while integer != 0:
_lowercase : List[str] = chr(ord("""A""" ) + integer % 10 ) + s
integer //= 10
return s
_lowercase : str = 0
while True:
_lowercase : List[str] = word + '''#''' + int_to_alphabetic(__lowercase )
if sword in info["reverse_short_word"]:
continue
else:
_lowercase : List[Any] = sword
break
_lowercase : Any = short_word
_lowercase : int = word
return short_word
@staticmethod
def lowerCamelCase__ ( UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Any = param_name.split("""_""" )
_lowercase : Optional[int] = [TrialShortNamer.shortname_for_word(__lowercase ,__lowercase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_lowercase : Any = ['''''', '''_''']
for separator in separators:
_lowercase : List[Any] = separator.join(__lowercase )
if shortname not in info["reverse_short_param"]:
_lowercase : str = shortname
_lowercase : Tuple = param_name
return shortname
return param_name
@staticmethod
def lowerCamelCase__ ( UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : List[Any] = TrialShortNamer.shortname_for_key(__lowercase ,__lowercase )
_lowercase : int = short_name
_lowercase : List[Any] = param_name
@classmethod
def lowerCamelCase__ ( cls ):
if cls.NAMING_INFO is not None:
return
_lowercase : Optional[Any] = {
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
_lowercase : Dict = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__lowercase ,__lowercase )
_lowercase : Any = info
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ):
cls.build_naming_info()
assert cls.PREFIX is not None
_lowercase : Optional[int] = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_lowercase : Tuple = cls.NAMING_INFO['''short_param'''][k]
if isinstance(__lowercase ,__lowercase ):
_lowercase : List[Any] = 1 if v else 0
_lowercase : Optional[int] = '''''' if isinstance(__lowercase ,(int, float) ) else '''-'''
_lowercase : Dict = f"""{key}{sep}{v}"""
name.append(__lowercase )
return "_".join(__lowercase )
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ):
_lowercase : List[str] = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
_lowercase : Dict = []
else:
_lowercase : List[Any] = repr.split("""_""" )
_lowercase : List[str] = {}
for value in values:
if "-" in value:
_lowercase : List[str] = value.split("""-""" )
else:
_lowercase : Union[str, Any] = re.sub("""[0-9.]""" ,"""""" ,__lowercase )
_lowercase : int = float(re.sub("""[^0-9.]""" ,"""""" ,__lowercase ) )
_lowercase : int = cls.NAMING_INFO['''reverse_short_param'''][p_k]
_lowercase : List[str] = p_v
for k in cls.DEFAULTS:
if k not in parameters:
_lowercase : str = cls.DEFAULTS[k]
return parameters
| 367 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCAmelCase: str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , ):
import pyspark
def generate_fn():
_lowercase : List[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
_lowercase : Optional[int] = df_with_partition_id.select("""*""" ).where(F"""part_id = {partition_id}""" ).drop("""part_id""" )
_lowercase : int = partition_df.collect()
_lowercase : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class UpperCamelCase ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ,):
_lowercase : Union[str, Any] = df
_lowercase : List[str] = partition_order or range(self.df.rdd.getNumPartitions() )
_lowercase : Tuple = _generate_iterable_examples(self.df ,self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCAmelCase_ )
return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = self.split_shard_indices_by_worker(UpperCAmelCase_ ,UpperCAmelCase_ )
return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ )
@property
def lowerCamelCase__ ( self ):
return len(self.partition_order )
class UpperCamelCase ( datasets.DatasetBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = SparkConfig
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
import pyspark
_lowercase : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
_lowercase : List[Any] = df
_lowercase : int = working_dir
super().__init__(
cache_dir=UpperCAmelCase_ ,config_name=str(self.df.semanticHash() ) ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(UpperCAmelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir ,exist_ok=UpperCAmelCase_ )
_lowercase : Union[str, Any] = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCAmelCase_ ,"""a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowercase : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(UpperCAmelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def lowerCamelCase__ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
import pyspark
def get_arrow_batch_size(UpperCAmelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
_lowercase : List[str] = self.df.count()
_lowercase : List[str] = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowercase : Union[str, Any] = (
self.df.limit(UpperCAmelCase_ )
.repartition(1 )
.mapInArrow(UpperCAmelCase_ ,"""batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowercase : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowercase : int = min(UpperCAmelCase_ ,int(approx_total_size / max_shard_size ) )
_lowercase : List[Any] = self.df.repartition(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
import pyspark
_lowercase : Union[str, Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter
_lowercase : List[Any] = os.path.join(self._working_dir ,os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath
_lowercase : Any = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowercase : Union[str, Any] = self.config.features
_lowercase : Optional[int] = self._writer_batch_size
_lowercase : Optional[Any] = self._fs.storage_options
def write_arrow(UpperCAmelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowercase : Any = pyspark.TaskContext().taskAttemptId()
_lowercase : List[str] = next(UpperCAmelCase_ ,UpperCAmelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
_lowercase : List[Any] = 0
_lowercase : int = writer_class(
features=UpperCAmelCase_ ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,)
_lowercase : Optional[int] = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCAmelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowercase , _lowercase : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
shard_id += 1
_lowercase : Union[str, Any] = writer_class(
features=writer._features ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,)
_lowercase : Dict = pa.Table.from_batches([batch] )
writer.write_table(UpperCAmelCase_ )
if writer._num_bytes > 0:
_lowercase , _lowercase : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ):
_lowercase : Dict = os.path.join(os.path.dirname(UpperCAmelCase_ ) ,os.path.basename(UpperCAmelCase_ ) )
shutil.move(UpperCAmelCase_ ,UpperCAmelCase_ )
_lowercase : List[str] = (
self.df.mapInArrow(UpperCAmelCase_ ,"""task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = "arrow" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
self._validate_cache_dir()
_lowercase : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCAmelCase_ )
_lowercase : Optional[int] = not is_remote_filesystem(self._fs )
_lowercase : Dict = os.path.join if is_local else posixpath.join
_lowercase : int = """-TTTTT-SSSSS-of-NNNNN"""
_lowercase : Optional[Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
_lowercase : Dict = path_join(self._output_dir ,UpperCAmelCase_ )
_lowercase : List[Any] = 0
_lowercase : Optional[Any] = 0
_lowercase : int = 0
_lowercase : Any = []
_lowercase : Any = []
for task_id, content in self._prepare_split_single(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCAmelCase_ )
_lowercase : Optional[int] = total_num_examples
_lowercase : List[Any] = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
_lowercase : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowercase : Union[str, Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
rename(
UpperCAmelCase_ ,fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace("""TTTTT-SSSSS""" ,f"""{global_shard_id:05d}""" ).replace("""NNNNN""" ,f"""{total_shards:05d}""" ) ,)
_lowercase : Optional[Any] = []
_lowercase : List[str] = 0
for i in range(len(UpperCAmelCase_ ) ):
_lowercase , _lowercase : List[str] = task_id_and_num_shards[i]
for shard_id in range(UpperCAmelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCAmelCase_ ,len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect()
else:
# don't use any pattern
_lowercase : Tuple = 0
_lowercase : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace(UpperCAmelCase_ ,"""""" ) ,)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,):
return SparkExamplesIterable(self.df )
| 336 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
return [sentence[i : i + ngram_size] for i in range(len(snake_case_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 368 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase: Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = XLNetTokenizer
SCREAMING_SNAKE_CASE_ : Dict = XLNetTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
def lowerCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = """<s>"""
_lowercase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) ,UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""<eod>""" )
self.assertEqual(len(UpperCAmelCase_ ) ,10_06 )
def lowerCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,10_00 )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[2_85, 46, 10, 1_70, 3_82] )
_lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
_lowercase : List[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""▁he""", """ll""", """o"""] )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
@slow
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
_lowercase : int = tokenizer.encode("""sequence builders""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
_lowercase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ,UpperCAmelCase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase__ ( self ):
# fmt: off
_lowercase : Union[str, Any] = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ ,model_name="""xlnet-base-cased""" ,revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" ,)
| 336 | 0 |
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ):
_lowercase : Optional[Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowercase : List[Any] = len(_UpperCAmelCase ) - 1
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowercase : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree ,_UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_UpperCAmelCase ) ,5 ) == 1
return output_values
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowercase : Optional[Any] = self.basis_function(_UpperCAmelCase )
_lowercase : Optional[int] = 0.0
_lowercase : str = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCamelCase__ ( self ,UpperCAmelCase_ = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
_lowercase : list[float] = [] # x coordinates of points to plot
_lowercase : list[float] = [] # y coordinates of points to plot
_lowercase : List[Any] = 0.0
while t <= 1:
_lowercase : Tuple = self.bezier_curve_function(_UpperCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_lowercase : Union[str, Any] = [i[0] for i in self.list_of_points]
_lowercase : str = [i[1] for i in self.list_of_points]
plt.plot(
_UpperCAmelCase ,_UpperCAmelCase ,color="""blue""" ,label="""Curve of Degree """ + str(self.degree ) ,)
plt.scatter(_UpperCAmelCase ,_UpperCAmelCase ,color="""red""" ,label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 369 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 0 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : List[Any] = tmp_path / "cache"
_lowercase : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowercase : Optional[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Optional[Any] = tmp_path / "cache"
_lowercase : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowercase : List[Any] = features.copy() if features else default_expected_features
_lowercase : Tuple = (
Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowercase : str = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Tuple = tmp_path / "cache"
_lowercase : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowercase : Tuple = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_lowercase : Optional[Any] = parquet_path
elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_lowercase : Dict = [parquet_path]
_lowercase : List[Any] = tmp_path / "cache"
_lowercase : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowercase : Any = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=("train",) ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for split in splits:
_lowercase : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : List[Any] = tmp_path / "cache"
_lowercase : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowercase : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = tmp_path / "cache"
_lowercase : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowercase : Dict = features.copy() if features else default_expected_features
_lowercase : Dict = (
Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowercase : List[Any] = ParquetDatasetReader({"""train""": parquet_path} , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if split:
_lowercase : Optional[int] = {split: parquet_path}
else:
_lowercase : Dict = "train"
_lowercase : int = {"train": parquet_path, "test": parquet_path}
_lowercase : Any = tmp_path / "cache"
_lowercase : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowercase : Optional[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : List[Any] = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_lowercase : Union[str, Any] = pq.ParquetFile(tmp_path / """foo.parquet""" )
_lowercase : Optional[Any] = pf.read()
assert dataset.data.table == output_table
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Optional[int] = str(shared_datadir / """test_image_rgb.jpg""" )
_lowercase : str = {"image": [image_path]}
_lowercase : List[Any] = Features({"""image""": Image()} )
_lowercase : Any = Dataset.from_dict(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE )
_lowercase : Dict = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_lowercase : Tuple = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_lowercase : Union[str, Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__SCREAMING_SNAKE_CASE ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
assert get_writer_batch_size(__SCREAMING_SNAKE_CASE ) == expected
| 370 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : int = []
for line in lines:
_lowercase : Dict = re.sub(R"""#.*""" , """""" , __UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(__UpperCAmelCase )
_lowercase : Tuple = """\n""".join(__UpperCAmelCase )
# Make a hash from all this code
_lowercase : Tuple = full_str.encode("""utf-8""" )
return shaaaa(__UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase: Tuple = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase: List[str] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase: Any = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
UpperCAmelCase: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 336 | 0 |
"""simple docstring"""
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif stress < 0:
raise ValueError("""Stress cannot be negative""" )
elif tangential_force < 0:
raise ValueError("""Tangential Force cannot be negative""" )
elif area < 0:
raise ValueError("""Area cannot be negative""" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 336 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase: Union[str, Any] = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: int = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Any = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCAmelCase: str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 350 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCAmelCase: Any = generate_large_matrix()
UpperCAmelCase: Dict = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
_lowercase : List[Any] = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_lowercase : Tuple = (left + right) // 2
_lowercase : List[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_lowercase : Dict = mid + 1
else:
_lowercase : Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Any = 0
_lowercase : Optional[int] = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print("""Running benchmarks""" )
_lowercase : Tuple = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_lowercase : Dict = timeit(F"""{func}(grid=grid)""" , setup=__UpperCAmelCase , number=500 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 336 | 0 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def lowerCamelCase__ ( self ,UpperCAmelCase_=0 ):
_lowercase : Optional[int] = np.random.RandomState(UpperCAmelCase_ )
_lowercase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self ):
_lowercase : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : List[Any] = pipe(**UpperCAmelCase_ ).images
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Tuple = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self ):
_lowercase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
_lowercase : Dict = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : Optional[Any] = pipe(**UpperCAmelCase_ ).images
_lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : int = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self ):
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
_lowercase : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**UpperCAmelCase_ ).images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self ):
_lowercase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
_lowercase : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowercase : str = self.get_dummy_inputs()
_lowercase : str = pipe(**UpperCAmelCase_ ).images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : str = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
_lowercase : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowercase : Optional[Any] = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**UpperCAmelCase_ ).images
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : int = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
_lowercase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : int = pipe(**UpperCAmelCase_ ).images
_lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[str] = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowercase : str = self.get_dummy_inputs()
_lowercase : Optional[Any] = 3 * [inputs["""prompt"""]]
# forward
_lowercase : str = pipe(**UpperCAmelCase_ )
_lowercase : Optional[int] = output.images[0, -3:, -3:, -1]
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : List[Any] = 3 * [inputs.pop("""prompt""" )]
_lowercase : Optional[int] = pipe.tokenizer(
UpperCAmelCase_ ,padding="""max_length""" ,max_length=pipe.tokenizer.model_max_length ,truncation=UpperCAmelCase_ ,return_tensors="""np""" ,)
_lowercase : Tuple = text_inputs["""input_ids"""]
_lowercase : int = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
_lowercase : str = prompt_embeds
# forward
_lowercase : List[Any] = pipe(**UpperCAmelCase_ )
_lowercase : Tuple = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowercase : Tuple = self.get_dummy_inputs()
_lowercase : List[Any] = 3 * ["""this is a negative prompt"""]
_lowercase : int = negative_prompt
_lowercase : Tuple = 3 * [inputs["""prompt"""]]
# forward
_lowercase : Dict = pipe(**UpperCAmelCase_ )
_lowercase : List[str] = output.images[0, -3:, -3:, -1]
_lowercase : Optional[int] = self.get_dummy_inputs()
_lowercase : Optional[int] = 3 * [inputs.pop("""prompt""" )]
_lowercase : Optional[Any] = []
for p in [prompt, negative_prompt]:
_lowercase : List[Any] = pipe.tokenizer(
UpperCAmelCase_ ,padding="""max_length""" ,max_length=pipe.tokenizer.model_max_length ,truncation=UpperCAmelCase_ ,return_tensors="""np""" ,)
_lowercase : Optional[Any] = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
_lowercase : Dict = embeds
# forward
_lowercase : List[Any] = pipe(**UpperCAmelCase_ )
_lowercase : Any = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCamelCase__ ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = ort.SessionOptions()
_lowercase : Any = False
return options
def lowerCamelCase__ ( self ):
# using the PNDM scheduler by default
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" ,revision="""onnx""" ,safety_checker=UpperCAmelCase_ ,feature_extractor=UpperCAmelCase_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowercase : List[Any] = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
_lowercase : List[Any] = sd_pipe([prompt] ,guidance_scale=6.0 ,num_inference_steps=10 ,output_type="""np""" )
_lowercase : Union[str, Any] = output.images
_lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self ):
_lowercase : Any = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,subfolder="""scheduler""" ,revision="""onnx""" )
_lowercase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,scheduler=UpperCAmelCase_ ,safety_checker=UpperCAmelCase_ ,feature_extractor=UpperCAmelCase_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowercase : Dict = """open neural network exchange"""
_lowercase : Optional[Any] = np.random.RandomState(0 )
_lowercase : Union[str, Any] = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=UpperCAmelCase_ ,output_type="""np""" )
_lowercase : Optional[Any] = output.images
_lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self ):
_lowercase : str = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,subfolder="""scheduler""" ,revision="""onnx""" )
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,scheduler=UpperCAmelCase_ ,safety_checker=UpperCAmelCase_ ,feature_extractor=UpperCAmelCase_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowercase : Dict = """open neural network exchange"""
_lowercase : Dict = np.random.RandomState(0 )
_lowercase : List[Any] = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=UpperCAmelCase_ ,output_type="""np""" )
_lowercase : Optional[Any] = output.images
_lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Dict = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self ):
_lowercase : List[str] = 0
def test_callback_fn(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> None:
_lowercase : int = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_lowercase : List[str] = latents[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_lowercase : str = latents[0, -3:, -3:, -1]
_lowercase : List[Any] = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
_lowercase : Tuple = False
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,safety_checker=UpperCAmelCase_ ,feature_extractor=UpperCAmelCase_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowercase : Optional[int] = """Andromeda galaxy in a bottle"""
_lowercase : Dict = np.random.RandomState(0 )
pipe(
prompt=UpperCAmelCase_ ,num_inference_steps=5 ,guidance_scale=7.5 ,generator=UpperCAmelCase_ ,callback=UpperCAmelCase_ ,callback_steps=1 ,)
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def lowerCamelCase__ ( self ):
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,safety_checker=UpperCAmelCase_ ,feature_extractor=UpperCAmelCase_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
assert isinstance(UpperCAmelCase_ ,UpperCAmelCase_ )
assert pipe.safety_checker is None
_lowercase : Optional[int] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase_ )
_lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(UpperCAmelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowercase : Any = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
| 351 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase: List[str] = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase: int = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
re.sub("""<n>""" , """""" , __UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
| 336 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ):
# test for the above condition
self.test()
def lowerCamelCase__ ( self ):
_lowercase : Any = 0
_lowercase : Tuple = False
while not completed:
if counter == 1:
self.reset()
_lowercase : Optional[Any] = self.advance()
if not self.does_advance(UpperCAmelCase_ ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
_lowercase : Optional[Any] = self.update(UpperCAmelCase_ )
counter += 1
if counter > 1_00_00:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def lowerCamelCase__ ( self ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowerCamelCase__ ( self ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowerCamelCase__ ( self ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowerCamelCase__ ( self ,UpperCAmelCase_=False ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ):
super(UpperCAmelCase_ ,self ).__init__()
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) or len(UpperCAmelCase_ ) == 0:
raise ValueError(f"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
_lowercase : int = token_ids
_lowercase : str = len(self.token_ids )
_lowercase : List[str] = -1 # the index of the currently fulfilled step
_lowercase : Optional[Any] = False
def lowerCamelCase__ ( self ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase_ )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase_ )}""" )
_lowercase : Optional[int] = False
_lowercase : List[str] = False
_lowercase : Optional[Any] = False
if self.does_advance(UpperCAmelCase_ ):
self.fulfilled_idx += 1
_lowercase : Any = True
if self.fulfilled_idx == (self.seqlen - 1):
_lowercase : int = True
_lowercase : Union[str, Any] = completed
else:
# failed to make progress.
_lowercase : Optional[Any] = True
self.reset()
return stepped, completed, reset
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = False
_lowercase : List[str] = 0
def lowerCamelCase__ ( self ):
return self.seqlen - (self.fulfilled_idx + 1)
def lowerCamelCase__ ( self ,UpperCAmelCase_=False ):
_lowercase : Any = PhrasalConstraint(self.token_ids )
if stateful:
_lowercase : Dict = self.seqlen
_lowercase : Tuple = self.fulfilled_idx
_lowercase : Tuple = self.completed
return new_constraint
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=True ):
_lowercase : Optional[int] = max([len(UpperCAmelCase_ ) for one in nested_token_ids] )
_lowercase : List[Any] = {}
for token_ids in nested_token_ids:
_lowercase : Union[str, Any] = root
for tidx, token_id in enumerate(UpperCAmelCase_ ):
if token_id not in level:
_lowercase : int = {}
_lowercase : str = level[token_id]
if no_subsets and self.has_subsets(UpperCAmelCase_ ,UpperCAmelCase_ ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
f""" {nested_token_ids}.""" )
_lowercase : int = root
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Any = self.trie
for current_token in current_seq:
_lowercase : Optional[Any] = start[current_token]
_lowercase : Any = list(start.keys() )
return next_tokens
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : str = self.next_tokens(UpperCAmelCase_ )
return len(UpperCAmelCase_ ) == 0
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[Any] = list(root.values() )
if len(UpperCAmelCase_ ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCAmelCase_ ) for nn in next_nodes] )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[Any] = self.count_leaves(UpperCAmelCase_ )
return len(UpperCAmelCase_ ) != leaf_count
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ):
super(UpperCAmelCase_ ,self ).__init__()
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) or len(UpperCAmelCase_ ) == 0:
raise ValueError(f"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) for token_ids in nested_token_ids ):
raise ValueError(f"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
_lowercase : str = DisjunctiveTrie(UpperCAmelCase_ )
_lowercase : Optional[Any] = nested_token_ids
_lowercase : Optional[Any] = self.trie.max_height
_lowercase : Optional[int] = []
_lowercase : int = False
def lowerCamelCase__ ( self ):
_lowercase : str = self.trie.next_tokens(self.current_seq )
if len(UpperCAmelCase_ ) == 0:
return None
else:
return token_list
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase_ )}""" )
_lowercase : Dict = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase_ )}""" )
_lowercase : Union[str, Any] = False
_lowercase : Dict = False
_lowercase : List[str] = False
if self.does_advance(UpperCAmelCase_ ):
self.current_seq.append(UpperCAmelCase_ )
_lowercase : Union[str, Any] = True
else:
_lowercase : Optional[int] = True
self.reset()
_lowercase : List[str] = self.trie.reached_leaf(self.current_seq )
_lowercase : Optional[int] = completed
return stepped, completed, reset
def lowerCamelCase__ ( self ):
_lowercase : Tuple = False
_lowercase : Union[str, Any] = []
def lowerCamelCase__ ( self ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowerCamelCase__ ( self ,UpperCAmelCase_=False ):
_lowercase : Tuple = DisjunctiveConstraint(self.token_ids )
if stateful:
_lowercase : Optional[Any] = self.seqlen
_lowercase : Any = self.current_seq
_lowercase : Dict = self.completed
return new_constraint
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ):
_lowercase : Dict = constraints
# max # of steps required to fulfill a given constraint
_lowercase : str = max([c.seqlen for c in constraints] )
_lowercase : str = len(UpperCAmelCase_ )
_lowercase : Any = False
self.init_state()
def lowerCamelCase__ ( self ):
_lowercase : List[str] = []
_lowercase : Dict = None
_lowercase : List[str] = [constraint.copy(stateful=UpperCAmelCase_ ) for constraint in self.constraints]
def lowerCamelCase__ ( self ):
_lowercase : str = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowerCamelCase__ ( self ):
_lowercase : Any = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
_lowercase : Any = constraint.advance()
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
token_list.append(UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
token_list.extend(UpperCAmelCase_ )
else:
_lowercase : List[Any] = self.inprogress_constraint.advance()
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
token_list.append(UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
token_list.extend(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) == 0:
return None
else:
return token_list
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
_lowercase : int = self.add(UpperCAmelCase_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
raise ValueError(f"""`token_id` should be an `int`, but is `{token_id}`.""" )
_lowercase : Tuple = False, False
if self.completed:
_lowercase : Union[str, Any] = True
_lowercase : List[Any] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
_lowercase : List[Any] = self.inprogress_constraint.update(UpperCAmelCase_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCAmelCase_ ) )
_lowercase : List[Any] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
_lowercase : List[Any] = None
if len(self.pending_constraints ) == 0:
# we're done!
_lowercase : int = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCAmelCase_ ):
_lowercase : Dict = pending_constraint.update(UpperCAmelCase_ )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(UpperCAmelCase_ )
_lowercase : Tuple = None
if not complete and stepped:
_lowercase : List[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
_lowercase : Optional[Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
_lowercase : Any = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowerCamelCase__ ( self ,UpperCAmelCase_=True ):
_lowercase : List[Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
_lowercase : int = [
constraint.copy(stateful=UpperCAmelCase_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
_lowercase : Optional[int] = self.inprogress_constraint.copy(stateful=UpperCAmelCase_ )
_lowercase : Dict = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 352 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowercase : str = []
for i in range(__UpperCAmelCase ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class UpperCamelCase ( snake_case , snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ : str = 2
@register_to_config
def __init__( self ,UpperCAmelCase_ = 10_00 ,UpperCAmelCase_ = 0.00085 ,UpperCAmelCase_ = 0.012 ,UpperCAmelCase_ = "linear" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = "epsilon" ,UpperCAmelCase_ = "linspace" ,UpperCAmelCase_ = 0 ,):
if trained_betas is not None:
_lowercase : str = torch.tensor(UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
_lowercase : Optional[Any] = torch.linspace(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Any = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,UpperCAmelCase_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Any = betas_for_alpha_bar(UpperCAmelCase_ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
_lowercase : Tuple = 1.0 - self.betas
_lowercase : Dict = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ):
if schedule_timesteps is None:
_lowercase : Optional[int] = self.timesteps
_lowercase : Union[str, Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowercase : Optional[Any] = 1 if len(UpperCAmelCase_ ) > 1 else 0
else:
_lowercase : Dict = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
_lowercase : List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCamelCase__ ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
_lowercase : str = self.index_for_timestep(UpperCAmelCase_ )
if self.state_in_first_order:
_lowercase : Optional[Any] = self.sigmas[step_index]
else:
_lowercase : Dict = self.sigmas_interpol[step_index]
_lowercase : Optional[int] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,):
_lowercase : List[str] = num_inference_steps
_lowercase : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowercase : Dict = np.linspace(0 ,num_train_timesteps - 1 ,UpperCAmelCase_ ,dtype=UpperCAmelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowercase : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : str = (np.arange(0 ,UpperCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCAmelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowercase : str = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : Optional[int] = (np.arange(UpperCAmelCase_ ,0 ,-step_ratio )).round().copy().astype(UpperCAmelCase_ )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_lowercase : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowercase : Optional[Any] = torch.from_numpy(np.log(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_lowercase : List[str] = np.interp(UpperCAmelCase_ ,np.arange(0 ,len(UpperCAmelCase_ ) ) ,UpperCAmelCase_ )
_lowercase : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowercase : Any = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ )
# interpolate sigmas
_lowercase : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
_lowercase : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_lowercase : Tuple = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCAmelCase_ ).startswith("""mps""" ):
# mps does not support float64
_lowercase : Tuple = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=torch.floataa )
else:
_lowercase : str = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ )
# interpolate timesteps
_lowercase : int = self.sigma_to_t(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=timesteps.dtype )
_lowercase : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
_lowercase : str = torch.cat([timesteps[:1], interleaved_timesteps] )
_lowercase : List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowercase : Optional[Any] = defaultdict(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
# get log sigma
_lowercase : Optional[Any] = sigma.log()
# get distribution
_lowercase : Optional[int] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowercase : Tuple = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_lowercase : List[Any] = low_idx + 1
_lowercase : int = self.log_sigmas[low_idx]
_lowercase : Any = self.log_sigmas[high_idx]
# interpolate sigmas
_lowercase : Any = (low - log_sigma) / (low - high)
_lowercase : Dict = w.clamp(0 ,1 )
# transform interpolation to time range
_lowercase : List[str] = (1 - w) * low_idx + w * high_idx
_lowercase : Optional[int] = t.view(sigma.shape )
return t
@property
def lowerCamelCase__ ( self ):
return self.sample is None
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = True ,):
_lowercase : Optional[int] = self.index_for_timestep(UpperCAmelCase_ )
# advance index counter by 1
_lowercase : str = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowercase : Any = self.sigmas[step_index]
_lowercase : Any = self.sigmas_interpol[step_index + 1]
_lowercase : Tuple = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowercase : Union[str, Any] = self.sigmas[step_index - 1]
_lowercase : int = self.sigmas_interpol[step_index]
_lowercase : Tuple = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowercase : Any = 0
_lowercase : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowercase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowercase : str = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowercase : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowercase : Any = sigma_interpol - sigma_hat
# store for 2nd order step
_lowercase : List[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowercase : Optional[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowercase : Optional[Any] = sigma_next - sigma_hat
_lowercase : Any = self.sample
_lowercase : Optional[int] = None
_lowercase : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_lowercase : int = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCAmelCase_ ):
# mps does not support float64
_lowercase : str = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_lowercase : Any = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_lowercase : List[Any] = self.timesteps.to(original_samples.device )
_lowercase : Union[str, Any] = timesteps.to(original_samples.device )
_lowercase : List[Any] = [self.index_for_timestep(UpperCAmelCase_ ,UpperCAmelCase_ ) for t in timesteps]
_lowercase : Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowercase : List[Any] = sigma.unsqueeze(-1 )
_lowercase : int = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 336 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=7 ,UpperCAmelCase_=3 ,UpperCAmelCase_=18 ,UpperCAmelCase_=30 ,UpperCAmelCase_=4_00 ,UpperCAmelCase_=True ,UpperCAmelCase_=None ,UpperCAmelCase_=True ,UpperCAmelCase_=None ,UpperCAmelCase_=True ,UpperCAmelCase_=[0.5, 0.5, 0.5] ,UpperCAmelCase_=[0.5, 0.5, 0.5] ,):
_lowercase : Tuple = size if size is not None else {"""shortest_edge""": 18}
_lowercase : int = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowercase : Optional[Any] = parent
_lowercase : Tuple = batch_size
_lowercase : Optional[int] = num_channels
_lowercase : Tuple = image_size
_lowercase : Dict = min_resolution
_lowercase : int = max_resolution
_lowercase : Optional[int] = do_resize
_lowercase : Any = size
_lowercase : str = do_center_crop
_lowercase : Optional[Any] = crop_size
_lowercase : Union[str, Any] = do_normalize
_lowercase : Union[str, Any] = image_mean
_lowercase : Tuple = image_std
def lowerCamelCase__ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self ):
_lowercase : Tuple = LevitImageProcessingTester(self )
@property
def lowerCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self ):
_lowercase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ ,"""image_mean""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,"""image_std""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_normalize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_center_crop""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,"""size""" ) )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
_lowercase : Any = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
# Initialize image_processing
_lowercase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ ,Image.Image )
# Test not batched input
_lowercase : str = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
_lowercase : Optional[int] = image_processing(UpperCAmelCase_ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def lowerCamelCase__ ( self ):
# Initialize image_processing
_lowercase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCAmelCase_ ,numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ ,np.ndarray )
# Test not batched input
_lowercase : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
_lowercase : Optional[Any] = image_processing(UpperCAmelCase_ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def lowerCamelCase__ ( self ):
# Initialize image_processing
_lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCAmelCase_ ,torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ ,torch.Tensor )
# Test not batched input
_lowercase : str = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
_lowercase : int = image_processing(UpperCAmelCase_ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 353 |
"""simple docstring"""
import pprint
import requests
UpperCAmelCase: Tuple = """https://zenquotes.io/api"""
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
UpperCAmelCase: int = random_quotes()
pprint.pprint(response)
| 336 | 0 |
"""simple docstring"""
from ....utils import logging
UpperCAmelCase: Tuple = logging.get_logger(__name__)
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ,UpperCAmelCase_=20_48 ):
_lowercase : Optional[Any] = config.__dict__
_lowercase : Union[str, Any] = modal_hidden_size
if num_labels:
_lowercase : List[Any] = num_labels
| 354 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : int
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_lowercase : Tuple = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_lowercase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_lowercase : Optional[Any] = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_lowercase : int = [""""""] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = """Provide a string that I will generate its BWT transform: """
UpperCAmelCase: int = input(entry_msg).strip()
UpperCAmelCase: List[str] = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
UpperCAmelCase: Union[str, Any] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
)
| 336 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = CpmAntTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = False
def lowerCamelCase__ ( self ):
super().setUp()
_lowercase : Optional[int] = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_lowercase : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def lowerCamelCase__ ( self ):
_lowercase : str = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_lowercase : List[str] = """今天天气真好!"""
_lowercase : Dict = ["""今天""", """天气""", """真""", """好""", """!"""]
_lowercase : List[str] = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
_lowercase : Tuple = """今天天气真好!"""
_lowercase : Any = [tokenizer.bos_token] + tokens
_lowercase : Union[str, Any] = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,UpperCAmelCase_ )
_lowercase : Optional[int] = tokenizer.decode(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
| 355 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Dict = [randint(-1000 , 1000 ) for i in range(10 )]
_lowercase : Tuple = randint(-5000 , 5000 )
return (arr, r)
UpperCAmelCase: int = make_dataset()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
for triplet in permutations(__UpperCAmelCase , 3 ):
if sum(__UpperCAmelCase ) == target:
return tuple(sorted(__UpperCAmelCase ) )
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
arr.sort()
_lowercase : Optional[Any] = len(__UpperCAmelCase )
for i in range(n - 1 ):
_lowercase , _lowercase : str = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Tuple = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_lowercase : Union[str, Any] = """
triplet_sum1(*dataset)
"""
_lowercase : Union[str, Any] = """
triplet_sum2(*dataset)
"""
_lowercase : Dict = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 )
_lowercase : Any = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 )
return (min(__UpperCAmelCase ), min(__UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase: Any = solution_times()
print(F'The time for naive implementation is {times[0]}.')
print(F'The time for optimized implementation is {times[1]}.')
| 336 | 0 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCAmelCase: Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCAmelCase: Tuple = [0, 25, 50]
UpperCAmelCase: List[Any] = [25, 50, 75]
UpperCAmelCase: Optional[int] = fuzz.membership.trimf(X, abca)
UpperCAmelCase: Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCAmelCase: List[Any] = np.ones(75)
UpperCAmelCase: Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCAmelCase: str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCAmelCase: List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCAmelCase: int = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCAmelCase: int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCAmelCase: List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCAmelCase: int = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 356 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : Optional[int] = "BlipImageProcessor"
SCREAMING_SNAKE_CASE_ : Tuple = "AutoTokenizer"
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ )
# add QFormer tokenizer
_lowercase : Optional[int] = qformer_tokenizer
def __call__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
_lowercase : List[Any] = BatchFeature()
if text is not None:
_lowercase : List[str] = self.tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
encoding.update(UpperCAmelCase_ )
_lowercase : Dict = self.qformer_tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
_lowercase : str = qformer_text_encoding.pop("""input_ids""" )
_lowercase : int = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
_lowercase : Optional[Any] = self.image_processor(UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ )
encoding.update(UpperCAmelCase_ )
return encoding
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.tokenizer.model_input_names
_lowercase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
if os.path.isfile(UpperCAmelCase_ ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCAmelCase_ ,exist_ok=UpperCAmelCase_ )
_lowercase : Any = os.path.join(UpperCAmelCase_ ,"""qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(UpperCAmelCase_ )
return super().save_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
_lowercase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ ,subfolder="""qformer_tokenizer""" )
_lowercase : Any = cls._get_arguments_from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
args.append(UpperCAmelCase_ )
return cls(*UpperCAmelCase_ )
| 336 | 0 |
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase = 1.6021e-19 # units = C
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase: Tuple = logging.get_logger(__name__)
UpperCAmelCase: List[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ : Optional[int] = ["past_key_values"]
SCREAMING_SNAKE_CASE_ : Tuple = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self ,UpperCAmelCase_=1_00 ,UpperCAmelCase_=5 ,UpperCAmelCase_=1 ,UpperCAmelCase_=1 ,UpperCAmelCase_=2_49 ,UpperCAmelCase_=6 ,UpperCAmelCase_=17 ,UpperCAmelCase_=25 ,UpperCAmelCase_=4 ,UpperCAmelCase_=4 ,UpperCAmelCase_=1_28 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.0006 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=1 ,UpperCAmelCase_=True ,UpperCAmelCase_=1 ,UpperCAmelCase_=5_02_56 ,UpperCAmelCase_=5_02_56 ,**UpperCAmelCase_ ,):
_lowercase : Dict = vocab_size
_lowercase : List[str] = action_weight
_lowercase : int = reward_weight
_lowercase : List[Any] = value_weight
_lowercase : List[str] = max_position_embeddings
_lowercase : Any = block_size
_lowercase : Any = action_dim
_lowercase : List[str] = observation_dim
_lowercase : Union[str, Any] = transition_dim
_lowercase : str = learning_rate
_lowercase : Tuple = n_layer
_lowercase : Optional[int] = n_head
_lowercase : List[str] = n_embd
_lowercase : List[str] = embd_pdrop
_lowercase : Optional[Any] = attn_pdrop
_lowercase : List[Any] = resid_pdrop
_lowercase : str = initializer_range
_lowercase : Optional[Any] = layer_norm_eps
_lowercase : List[Any] = kaiming_initializer_range
_lowercase : List[Any] = use_cache
super().__init__(pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
| 336 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ):
_lowercase : Union[str, Any] = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = """sshleifer/tiny-gpt2"""
_lowercase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=UpperCAmelCase_ ,inference=UpperCAmelCase_ ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=UpperCAmelCase_ ,multi_process=UpperCAmelCase_ ,)
_lowercase : Any = TensorFlowBenchmark(UpperCAmelCase_ )
_lowercase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase__ ( self ):
_lowercase : int = """sgugger/tiny-distilbert-classification"""
_lowercase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=UpperCAmelCase_ ,inference=UpperCAmelCase_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=UpperCAmelCase_ ,only_pretrain_model=UpperCAmelCase_ ,)
_lowercase : List[Any] = TensorFlowBenchmark(UpperCAmelCase_ )
_lowercase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase__ ( self ):
_lowercase : List[str] = """sshleifer/tiny-gpt2"""
_lowercase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=UpperCAmelCase_ ,inference=UpperCAmelCase_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=UpperCAmelCase_ ,)
_lowercase : List[str] = TensorFlowBenchmark(UpperCAmelCase_ )
_lowercase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = """sshleifer/tiny-gpt2"""
_lowercase : str = AutoConfig.from_pretrained(UpperCAmelCase_ )
_lowercase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=UpperCAmelCase_ ,inference=UpperCAmelCase_ ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=UpperCAmelCase_ ,multi_process=UpperCAmelCase_ ,)
_lowercase : str = TensorFlowBenchmark(UpperCAmelCase_ ,[config] )
_lowercase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = """sshleifer/tiny-gpt2"""
_lowercase : str = AutoConfig.from_pretrained(UpperCAmelCase_ )
_lowercase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=UpperCAmelCase_ ,inference=UpperCAmelCase_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=UpperCAmelCase_ ,)
_lowercase : Union[str, Any] = TensorFlowBenchmark(UpperCAmelCase_ ,[config] )
_lowercase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = """sshleifer/tiny-gpt2"""
_lowercase : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=UpperCAmelCase_ ,inference=UpperCAmelCase_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=UpperCAmelCase_ ,)
_lowercase : Optional[int] = TensorFlowBenchmark(UpperCAmelCase_ )
_lowercase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase__ ( self ):
_lowercase : Tuple = """sshleifer/tiny-gpt2"""
_lowercase : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ )
_lowercase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=UpperCAmelCase_ ,inference=UpperCAmelCase_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=UpperCAmelCase_ ,)
_lowercase : Dict = TensorFlowBenchmark(UpperCAmelCase_ ,[config] )
_lowercase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase__ ( self ):
_lowercase : Tuple = """patrickvonplaten/t5-tiny-random"""
_lowercase : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ )
_lowercase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=UpperCAmelCase_ ,inference=UpperCAmelCase_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=UpperCAmelCase_ ,)
_lowercase : str = TensorFlowBenchmark(UpperCAmelCase_ ,configs=[config] )
_lowercase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,"""Cannot do xla on CPU.""" )
def lowerCamelCase__ ( self ):
_lowercase : Any = """sshleifer/tiny-gpt2"""
_lowercase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=UpperCAmelCase_ ,inference=UpperCAmelCase_ ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=UpperCAmelCase_ ,multi_process=UpperCAmelCase_ ,)
_lowercase : List[str] = TensorFlowBenchmark(UpperCAmelCase_ )
_lowercase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=UpperCAmelCase_ ,save_to_csv=UpperCAmelCase_ ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(UpperCAmelCase_ ,"""inf_time.csv""" ) ,inference_memory_csv_file=os.path.join(UpperCAmelCase_ ,"""inf_mem.csv""" ) ,env_info_csv_file=os.path.join(UpperCAmelCase_ ,"""env.csv""" ) ,multi_process=UpperCAmelCase_ ,)
_lowercase : Tuple = TensorFlowBenchmark(UpperCAmelCase_ )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCAmelCase_ ,"""inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase_ ,"""inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase_ ,"""env.csv""" ) ).exists() )
def lowerCamelCase__ ( self ):
_lowercase : Dict = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(UpperCAmelCase_ ):
self.assertTrue(hasattr(UpperCAmelCase_ ,"""sequential""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,"""cumulative""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,"""current""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,"""total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=UpperCAmelCase_ ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(UpperCAmelCase_ ,"""log.txt""" ) ,log_print=UpperCAmelCase_ ,trace_memory_line_by_line=UpperCAmelCase_ ,eager_mode=UpperCAmelCase_ ,multi_process=UpperCAmelCase_ ,)
_lowercase : Optional[Any] = TensorFlowBenchmark(UpperCAmelCase_ )
_lowercase : int = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(UpperCAmelCase_ ,"""log.txt""" ) ).exists() )
| 358 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase: Any = logging.get_logger(__name__)
UpperCAmelCase: List[str] = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "instructblip_vision_model"
def __init__( self ,UpperCAmelCase_=14_08 ,UpperCAmelCase_=61_44 ,UpperCAmelCase_=39 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=14 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=1E-6 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=1E-10 ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Tuple = intermediate_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = patch_size
_lowercase : Optional[Any] = image_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[Any] = attention_dropout
_lowercase : List[Any] = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : Tuple = qkv_bias
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : List[Any] = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "instructblip_qformer"
def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=0 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=2 ,UpperCAmelCase_=14_08 ,**UpperCAmelCase_ ,):
super().__init__(pad_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : List[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : Optional[Any] = hidden_act
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : List[Any] = max_position_embeddings
_lowercase : Tuple = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Any = position_embedding_type
_lowercase : Dict = cross_attention_frequency
_lowercase : Optional[Any] = encoder_hidden_size
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : Dict = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : str = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "instructblip"
SCREAMING_SNAKE_CASE_ : List[str] = True
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=32 ,**UpperCAmelCase_ ):
super().__init__(**UpperCAmelCase_ )
if vision_config is None:
_lowercase : str = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
_lowercase : Any = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
_lowercase : Optional[int] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
_lowercase : int = InstructBlipVisionConfig(**UpperCAmelCase_ )
_lowercase : Optional[int] = InstructBlipQFormerConfig(**UpperCAmelCase_ )
_lowercase : Dict = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
_lowercase : str = CONFIG_MAPPING[text_model_type](**UpperCAmelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : Union[str, Any] = self.text_config.is_encoder_decoder
_lowercase : List[str] = num_query_tokens
_lowercase : List[str] = self.vision_config.hidden_size
_lowercase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : Union[str, Any] = 1.0
_lowercase : Dict = 0.02
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowercase : int = self.vision_config.to_dict()
_lowercase : Any = self.qformer_config.to_dict()
_lowercase : Any = self.text_config.to_dict()
_lowercase : Optional[int] = self.__class__.model_type
return output
| 336 | 0 |
"""simple docstring"""
UpperCAmelCase: Optional[Any] = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 359 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if k in (0.04, 0.06):
_lowercase : Optional[Any] = k
_lowercase : Optional[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self ):
return str(self.k )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 )
_lowercase , _lowercase : Dict = img.shape
_lowercase : list[list[int]] = []
_lowercase : int = img.copy()
_lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB )
_lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ )
_lowercase : Optional[int] = dx**2
_lowercase : Optional[Any] = dy**2
_lowercase : Optional[Any] = dx * dy
_lowercase : List[str] = 0.04
_lowercase : Optional[Any] = self.window_size // 2
for y in range(UpperCAmelCase_ ,h - offset ):
for x in range(UpperCAmelCase_ ,w - offset ):
_lowercase : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Dict = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Union[str, Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : int = (wxx * wyy) - (wxy**2)
_lowercase : Union[str, Any] = wxx + wyy
_lowercase : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,2_55 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3)
UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 336 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
UpperCAmelCase: Optional[Any] = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
UpperCAmelCase: Union[str, Any] = BASE_URL + """/user"""
# https://github.com/settings/tokens
UpperCAmelCase: List[str] = os.environ.get("""USER_TOKEN""", """""")
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : List[Any] = {
"""Authorization""": F"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(__UpperCAmelCase , headers=__UpperCAmelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'{key}: {value}')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 360 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = LEDTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = LEDTokenizerFast
SCREAMING_SNAKE_CASE_ : List[str] = True
def lowerCamelCase__ ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowercase : List[Any] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowercase : Dict = {"""unk_token""": """<unk>"""}
_lowercase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowercase : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(UpperCAmelCase_ ,max_length=len(UpperCAmelCase_ ) ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
_lowercase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIn("""input_ids""" ,UpperCAmelCase_ )
self.assertIn("""attention_mask""" ,UpperCAmelCase_ )
self.assertNotIn("""labels""" ,UpperCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(text_target=UpperCAmelCase_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" )
self.assertEqual(32 ,targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : List[Any] = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""] ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual(batch.input_ids.shape ,(2, 51_22) )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = ["""A long paragraph for summarization."""]
_lowercase : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : List[str] = tokenizer(text_target=UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : Union[str, Any] = inputs["""input_ids"""]
_lowercase : List[str] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : str = ["""Summary of the text.""", """Another summary."""]
_lowercase : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowercase : Any = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ )
_lowercase : str = [[0] * len(UpperCAmelCase_ ) for x in encoded_output["""input_ids"""]]
_lowercase : Optional[int] = tokenizer.pad(UpperCAmelCase_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Dict = """A, <mask> AllenNLP sentence."""
_lowercase : List[Any] = tokenizer_r.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
_lowercase : Any = tokenizer_p.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
_lowercase : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 336 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase: Tuple = logging.get_logger(__name__)
UpperCAmelCase: List[Any] = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "trocr"
SCREAMING_SNAKE_CASE_ : Optional[int] = ["past_key_values"]
SCREAMING_SNAKE_CASE_ : str = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self ,UpperCAmelCase_=5_02_65 ,UpperCAmelCase_=10_24 ,UpperCAmelCase_=12 ,UpperCAmelCase_=16 ,UpperCAmelCase_=40_96 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=2 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=True ,UpperCAmelCase_=False ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_=1 ,UpperCAmelCase_=0 ,UpperCAmelCase_=2 ,**UpperCAmelCase_ ,):
_lowercase : List[Any] = vocab_size
_lowercase : Optional[Any] = d_model
_lowercase : List[Any] = decoder_layers
_lowercase : Optional[int] = decoder_attention_heads
_lowercase : Dict = decoder_ffn_dim
_lowercase : Any = activation_function
_lowercase : Tuple = max_position_embeddings
_lowercase : int = dropout
_lowercase : Union[str, Any] = attention_dropout
_lowercase : Optional[Any] = activation_dropout
_lowercase : List[Any] = init_std
_lowercase : List[Any] = decoder_layerdrop
_lowercase : str = use_cache
_lowercase : Tuple = scale_embedding
_lowercase : Optional[int] = use_learned_position_embeddings
_lowercase : str = layernorm_embedding
super().__init__(
pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,decoder_start_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
| 361 |
"""simple docstring"""
import argparse
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Any = f.readlines()
_lowercase : Optional[int] = F"""class {class_name}("""
_lowercase : List[str] = F"""{4 * " "}def {test_name}("""
_lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}"""
_lowercase : int = F"""{16 * " "}{correct_line.split()[0]}"""
_lowercase : str = False
_lowercase : Optional[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = False
_lowercase : int = 0
_lowercase : Tuple = 0
_lowercase : Union[str, Any] = []
for line in lines:
if line.startswith(__UpperCAmelCase ):
_lowercase : List[str] = True
elif in_class and line.startswith(__UpperCAmelCase ):
_lowercase : str = True
elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )):
_lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : Optional[int] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Optional[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_lowercase : Union[str, Any] = False
else:
new_lines.append(__UpperCAmelCase )
with open(__UpperCAmelCase , """w""" ) as f:
for line in new_lines:
f.write(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ):
if fail is not None:
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Dict = {l.strip() for l in f.readlines()}
else:
_lowercase : int = None
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : int = f.readlines()
_lowercase : int = defaultdict(__UpperCAmelCase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : int = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase: List[Any] = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
UpperCAmelCase: Any = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 336 | 0 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
UpperCAmelCase: List[Any] = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
def constraint_to_multiple_of(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=None ):
_lowercase : Union[str, Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowercase : str = math.floor(val / multiple ) * multiple
if x < min_val:
_lowercase : Dict = math.ceil(val / multiple ) * multiple
return x
_lowercase : List[str] = (output_size, output_size) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else output_size
_lowercase : List[Any] = get_image_size(__UpperCAmelCase )
_lowercase : Union[str, Any] = output_size
# determine new height and width
_lowercase : str = output_height / input_height
_lowercase : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowercase : str = scale_width
else:
# fit height
_lowercase : int = scale_height
_lowercase : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=__UpperCAmelCase )
_lowercase : Optional[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__UpperCAmelCase )
return (new_height, new_width)
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["pixel_values"]
def __init__( self ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = 1 / 2_55 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : List[Any] = size if size is not None else {"""height""": 3_84, """width""": 3_84}
_lowercase : str = get_size_dict(UpperCAmelCase_ )
_lowercase : Tuple = do_resize
_lowercase : Any = size
_lowercase : List[Any] = keep_aspect_ratio
_lowercase : Any = ensure_multiple_of
_lowercase : str = resample
_lowercase : Optional[Any] = do_rescale
_lowercase : List[Any] = rescale_factor
_lowercase : Union[str, Any] = do_normalize
_lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = PILImageResampling.BICUBIC ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
_lowercase : Optional[Any] = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_lowercase : Dict = get_resize_output_image_size(
UpperCAmelCase_ ,output_size=(size["""height"""], size["""width"""]) ,keep_aspect_ratio=UpperCAmelCase_ ,multiple=UpperCAmelCase_ ,)
return resize(UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return rescale(UpperCAmelCase_ ,scale=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return normalize(UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = ChannelDimension.FIRST ,**UpperCAmelCase_ ,):
_lowercase : Any = do_resize if do_resize is not None else self.do_resize
_lowercase : List[str] = size if size is not None else self.size
_lowercase : int = get_size_dict(UpperCAmelCase_ )
_lowercase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowercase : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowercase : List[str] = resample if resample is not None else self.resample
_lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : str = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_lowercase : int = image_std if image_std is not None else self.image_std
_lowercase : Union[str, Any] = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowercase : int = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
_lowercase : Union[str, Any] = [self.resize(image=UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ) for image in images]
if do_rescale:
_lowercase : int = [self.rescale(image=UpperCAmelCase_ ,scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
_lowercase : str = [self.normalize(image=UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ) for image in images]
_lowercase : Tuple = [to_channel_dimension_format(UpperCAmelCase_ ,UpperCAmelCase_ ) for image in images]
_lowercase : int = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase_ ,tensor_type=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(UpperCAmelCase_ ):
_lowercase : Tuple = target_sizes.numpy()
_lowercase : Optional[Any] = []
for idx in range(len(UpperCAmelCase_ ) ):
_lowercase : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=UpperCAmelCase_ )
_lowercase : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase_ )
else:
_lowercase : Union[str, Any] = logits.argmax(dim=1 )
_lowercase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 362 |
"""simple docstring"""
UpperCAmelCase: List[str] = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 336 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__UpperCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
# Base Case
if index == len(__UpperCAmelCase ):
return True
# Recursive Step
for i in range(__UpperCAmelCase ):
if valid_coloring(graph[index] , __UpperCAmelCase , __UpperCAmelCase ):
# Color current vertex
_lowercase : Dict = i
# Validate coloring
if util_color(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , index + 1 ):
return True
# Backtrack
_lowercase : Dict = -1
return False
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Union[str, Any] = [-1] * len(__UpperCAmelCase )
if util_color(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , 0 ):
return colored_vertices
return []
| 363 |
"""simple docstring"""
UpperCAmelCase: str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCAmelCase: Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCAmelCase: int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 336 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : str = params
_lowercase : Optional[int] = np.array(UpperCAmelCase_ )
_lowercase : Any = np.array([len(UpperCAmelCase_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self ,UpperCAmelCase_ ):
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
return len(self.lengths )
def lowerCamelCase__ ( self ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.params.max_model_input_size
_lowercase : Any = self.lengths > max_len
logger.info(f"""Splitting {sum(UpperCAmelCase_ )} too long sequences.""" )
def divide_chunks(UpperCAmelCase_ ,UpperCAmelCase_ ):
return [l[i : i + n] for i in range(0 ,len(UpperCAmelCase_ ) ,UpperCAmelCase_ )]
_lowercase : str = []
_lowercase : Tuple = []
if self.params.mlm:
_lowercase : List[str] = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
_lowercase : Optional[int] = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids ,self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_lowercase : Tuple = []
for sub_s in divide_chunks(seq_ ,max_len - 2 ):
if sub_s[0] != cls_id:
_lowercase : Optional[Any] = np.insert(UpperCAmelCase_ ,0 ,UpperCAmelCase_ )
if sub_s[-1] != sep_id:
_lowercase : Any = np.insert(UpperCAmelCase_ ,len(UpperCAmelCase_ ) ,UpperCAmelCase_ )
assert len(UpperCAmelCase_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(UpperCAmelCase_ )
new_tok_ids.extend(UpperCAmelCase_ )
new_lengths.extend([len(UpperCAmelCase_ ) for l in sub_seqs] )
_lowercase : Union[str, Any] = np.array(UpperCAmelCase_ )
_lowercase : Any = np.array(UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : int = len(self )
_lowercase : Union[str, Any] = self.lengths > 11
_lowercase : Optional[int] = self.token_ids[indices]
_lowercase : Any = self.lengths[indices]
_lowercase : Any = len(self )
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def lowerCamelCase__ ( self ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
_lowercase : Tuple = self.params.special_tok_ids["""unk_token"""]
_lowercase : Optional[Any] = len(self )
_lowercase : List[Any] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_lowercase : int = (unk_occs / self.lengths) < 0.5
_lowercase : List[str] = self.token_ids[indices]
_lowercase : List[str] = self.lengths[indices]
_lowercase : List[str] = len(self )
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def lowerCamelCase__ ( self ):
if not self.params.is_master:
return
logger.info(f"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : Optional[int] = [t[0] for t in batch]
_lowercase : Optional[int] = [t[1] for t in batch]
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
# Max for paddings
_lowercase : List[Any] = max(UpperCAmelCase_ )
# Pad token ids
if self.params.mlm:
_lowercase : int = self.params.special_tok_ids["""pad_token"""]
else:
_lowercase : Any = self.params.special_tok_ids["""unk_token"""]
_lowercase : List[Any] = [list(t.astype(UpperCAmelCase_ ) ) + [pad_idx] * (max_seq_len_ - len(UpperCAmelCase_ )) for t in token_ids]
assert len(tk_ ) == len(UpperCAmelCase_ )
assert all(len(UpperCAmelCase_ ) == max_seq_len_ for t in tk_ )
_lowercase : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
_lowercase : Any = torch.tensor(UpperCAmelCase_ ) # (bs)
return tk_t, lg_t
| 364 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
UpperCAmelCase: List[Any] = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
def constraint_to_multiple_of(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=None ):
_lowercase : Union[str, Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowercase : str = math.floor(val / multiple ) * multiple
if x < min_val:
_lowercase : Dict = math.ceil(val / multiple ) * multiple
return x
_lowercase : List[str] = (output_size, output_size) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else output_size
_lowercase , _lowercase : List[Any] = get_image_size(__UpperCAmelCase )
_lowercase , _lowercase : Union[str, Any] = output_size
# determine new height and width
_lowercase : str = output_height / input_height
_lowercase : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowercase : str = scale_width
else:
# fit height
_lowercase : int = scale_height
_lowercase : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=__UpperCAmelCase )
_lowercase : Optional[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__UpperCAmelCase )
return (new_height, new_width)
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["pixel_values"]
def __init__( self ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = 1 / 2_55 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : List[Any] = size if size is not None else {"""height""": 3_84, """width""": 3_84}
_lowercase : str = get_size_dict(UpperCAmelCase_ )
_lowercase : Tuple = do_resize
_lowercase : Any = size
_lowercase : List[Any] = keep_aspect_ratio
_lowercase : Any = ensure_multiple_of
_lowercase : str = resample
_lowercase : Optional[Any] = do_rescale
_lowercase : List[Any] = rescale_factor
_lowercase : Union[str, Any] = do_normalize
_lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = PILImageResampling.BICUBIC ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
_lowercase : Optional[Any] = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_lowercase : Dict = get_resize_output_image_size(
UpperCAmelCase_ ,output_size=(size["""height"""], size["""width"""]) ,keep_aspect_ratio=UpperCAmelCase_ ,multiple=UpperCAmelCase_ ,)
return resize(UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return rescale(UpperCAmelCase_ ,scale=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return normalize(UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = ChannelDimension.FIRST ,**UpperCAmelCase_ ,):
_lowercase : Any = do_resize if do_resize is not None else self.do_resize
_lowercase : List[str] = size if size is not None else self.size
_lowercase : int = get_size_dict(UpperCAmelCase_ )
_lowercase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowercase : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowercase : List[str] = resample if resample is not None else self.resample
_lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : str = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_lowercase : int = image_std if image_std is not None else self.image_std
_lowercase : Union[str, Any] = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowercase : int = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
_lowercase : Union[str, Any] = [self.resize(image=UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ) for image in images]
if do_rescale:
_lowercase : int = [self.rescale(image=UpperCAmelCase_ ,scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
_lowercase : str = [self.normalize(image=UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ) for image in images]
_lowercase : Tuple = [to_channel_dimension_format(UpperCAmelCase_ ,UpperCAmelCase_ ) for image in images]
_lowercase : int = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase_ ,tensor_type=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(UpperCAmelCase_ ):
_lowercase : Tuple = target_sizes.numpy()
_lowercase : Optional[Any] = []
for idx in range(len(UpperCAmelCase_ ) ):
_lowercase : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=UpperCAmelCase_ )
_lowercase : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase_ )
else:
_lowercase : Union[str, Any] = logits.argmax(dim=1 )
_lowercase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 336 | 0 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class UpperCamelCase ( snake_case ):
"""simple docstring"""
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , snake_case , )
| 365 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCAmelCase: Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCAmelCase: Tuple = [0, 25, 50]
UpperCAmelCase: List[Any] = [25, 50, 75]
UpperCAmelCase: Optional[int] = fuzz.membership.trimf(X, abca)
UpperCAmelCase: Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCAmelCase: List[Any] = np.ones(75)
UpperCAmelCase: Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCAmelCase: str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCAmelCase: List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCAmelCase: int = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCAmelCase: int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCAmelCase: List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCAmelCase: int = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 336 | 0 |
"""simple docstring"""
import math
def a ( __UpperCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( __UpperCAmelCase = 10001 ):
try:
_lowercase : str = int(__UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""" ) from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""" )
_lowercase : list[int] = []
_lowercase : int = 2
while len(__UpperCAmelCase ) < nth:
if is_prime(__UpperCAmelCase ):
primes.append(__UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(__UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 366 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : str = tempfile.mkdtemp()
# fmt: off
_lowercase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowercase : Optional[int] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
_lowercase : Dict = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
_lowercase : List[Any] = os.path.join(self.tmpdirname ,UpperCAmelCase_ )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
_lowercase : Tuple = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : List[Any] = self.get_rust_tokenizer()
_lowercase : List[Any] = self.get_image_processor()
_lowercase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
_lowercase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCAmelCase_ )
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
_lowercase : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_lowercase : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
_lowercase : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[int] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : int = self.prepare_image_inputs()
_lowercase : str = image_processor(UpperCAmelCase_ ,return_tensors="""np""" )
_lowercase : int = processor(images=UpperCAmelCase_ ,return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : List[Any] = """lower newer"""
_lowercase : Any = processor(text=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : str = """lower newer"""
_lowercase : List[Any] = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : int = processor.batch_decode(UpperCAmelCase_ )
_lowercase : Tuple = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Optional[Any] = """lower newer"""
_lowercase : Any = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 336 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase: Optional[int] = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: List[Any] = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase: Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 367 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCAmelCase: str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , ):
import pyspark
def generate_fn():
_lowercase : List[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
_lowercase : Optional[int] = df_with_partition_id.select("""*""" ).where(F"""part_id = {partition_id}""" ).drop("""part_id""" )
_lowercase : int = partition_df.collect()
_lowercase : Dict = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class UpperCamelCase ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ,):
_lowercase : Union[str, Any] = df
_lowercase : List[str] = partition_order or range(self.df.rdd.getNumPartitions() )
_lowercase : Tuple = _generate_iterable_examples(self.df ,self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCAmelCase_ )
return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = self.split_shard_indices_by_worker(UpperCAmelCase_ ,UpperCAmelCase_ )
return SparkExamplesIterable(self.df ,partition_order=UpperCAmelCase_ )
@property
def lowerCamelCase__ ( self ):
return len(self.partition_order )
class UpperCamelCase ( datasets.DatasetBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = SparkConfig
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
import pyspark
_lowercase : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
_lowercase : List[Any] = df
_lowercase : int = working_dir
super().__init__(
cache_dir=UpperCAmelCase_ ,config_name=str(self.df.semanticHash() ) ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(UpperCAmelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir ,exist_ok=UpperCAmelCase_ )
_lowercase : Union[str, Any] = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCAmelCase_ ,"""a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowercase : List[str] = (
self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(UpperCAmelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def lowerCamelCase__ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
import pyspark
def get_arrow_batch_size(UpperCAmelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
_lowercase : List[str] = self.df.count()
_lowercase : List[str] = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowercase : Union[str, Any] = (
self.df.limit(UpperCAmelCase_ )
.repartition(1 )
.mapInArrow(UpperCAmelCase_ ,"""batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowercase : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowercase : int = min(UpperCAmelCase_ ,int(approx_total_size / max_shard_size ) )
_lowercase : List[Any] = self.df.repartition(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
import pyspark
_lowercase : Union[str, Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter
_lowercase : List[Any] = os.path.join(self._working_dir ,os.path.basename(UpperCAmelCase_ ) ) if self._working_dir else fpath
_lowercase : Any = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowercase : Union[str, Any] = self.config.features
_lowercase : Optional[int] = self._writer_batch_size
_lowercase : Optional[Any] = self._fs.storage_options
def write_arrow(UpperCAmelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowercase : Any = pyspark.TaskContext().taskAttemptId()
_lowercase : List[str] = next(UpperCAmelCase_ ,UpperCAmelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
_lowercase : List[Any] = 0
_lowercase : int = writer_class(
features=UpperCAmelCase_ ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,)
_lowercase : Optional[int] = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCAmelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowercase , _lowercase : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
shard_id += 1
_lowercase : Union[str, Any] = writer_class(
features=writer._features ,path=working_fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,writer_batch_size=UpperCAmelCase_ ,storage_options=UpperCAmelCase_ ,embed_local_files=UpperCAmelCase_ ,)
_lowercase : Dict = pa.Table.from_batches([batch] )
writer.write_table(UpperCAmelCase_ )
if writer._num_bytes > 0:
_lowercase , _lowercase : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCAmelCase_ ) ):
_lowercase : Dict = os.path.join(os.path.dirname(UpperCAmelCase_ ) ,os.path.basename(UpperCAmelCase_ ) )
shutil.move(UpperCAmelCase_ ,UpperCAmelCase_ )
_lowercase : List[str] = (
self.df.mapInArrow(UpperCAmelCase_ ,"""task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = "arrow" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
self._validate_cache_dir()
_lowercase : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCAmelCase_ )
_lowercase : Optional[int] = not is_remote_filesystem(self._fs )
_lowercase : Dict = os.path.join if is_local else posixpath.join
_lowercase : int = """-TTTTT-SSSSS-of-NNNNN"""
_lowercase : Optional[Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
_lowercase : Dict = path_join(self._output_dir ,UpperCAmelCase_ )
_lowercase : List[Any] = 0
_lowercase : Optional[Any] = 0
_lowercase : int = 0
_lowercase : Any = []
_lowercase : Any = []
for task_id, content in self._prepare_split_single(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCAmelCase_ )
_lowercase : Optional[int] = total_num_examples
_lowercase : List[Any] = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
_lowercase : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowercase : Union[str, Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
rename(
UpperCAmelCase_ ,fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace("""TTTTT-SSSSS""" ,f"""{global_shard_id:05d}""" ).replace("""NNNNN""" ,f"""{total_shards:05d}""" ) ,)
_lowercase : Optional[Any] = []
_lowercase : List[str] = 0
for i in range(len(UpperCAmelCase_ ) ):
_lowercase , _lowercase : List[str] = task_id_and_num_shards[i]
for shard_id in range(UpperCAmelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCAmelCase_ ,len(UpperCAmelCase_ ) ).map(lambda UpperCAmelCase_ : _rename_shard(*UpperCAmelCase_ ) ).collect()
else:
# don't use any pattern
_lowercase : Tuple = 0
_lowercase : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" ,f"""{shard_id:05d}""" ).replace("""TTTTT""" ,f"""{task_id:05d}""" ) ,fpath.replace(UpperCAmelCase_ ,"""""" ) ,)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,):
return SparkExamplesIterable(self.df )
| 336 | 0 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True ):
model.train()
_lowercase : List[str] = model(__UpperCAmelCase )
_lowercase : int = F.mse_loss(__UpperCAmelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=False ):
set_seed(42 )
_lowercase : Union[str, Any] = RegressionModel()
_lowercase : int = deepcopy(__UpperCAmelCase )
_lowercase : List[str] = RegressionDataset(length=80 )
_lowercase : Union[str, Any] = DataLoader(__UpperCAmelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
_lowercase : Optional[int] = AdamW(params=model.parameters() , lr=1E-3 )
_lowercase : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3 )
_lowercase : Optional[int] = LambdaLR(__UpperCAmelCase , lr_lambda=lambda __UpperCAmelCase : epoch**0.6_5 )
_lowercase : Optional[Any] = LambdaLR(__UpperCAmelCase , lr_lambda=lambda __UpperCAmelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
_lowercase : Any = accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
_lowercase : Tuple = accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
_lowercase : str = get_training_setup(__UpperCAmelCase )
# Use a single batch
_lowercase : Optional[int] = next(iter(__UpperCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_lowercase : int = accelerator.gather((ddp_input, ddp_target) )
_lowercase : Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__UpperCAmelCase ):
step_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
# Sync grads
step_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_lowercase : Dict = ddp_input[torch.randperm(len(__UpperCAmelCase ) )]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
# Test on distributed setup that context manager behaves properly
_lowercase : Optional[int] = get_training_setup(__UpperCAmelCase )
# Use a single batch
_lowercase : Dict = next(iter(__UpperCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_lowercase : str = accelerator.gather((ddp_input, ddp_target) )
_lowercase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__UpperCAmelCase ):
step_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
# Sync grads
step_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_lowercase : Any = ddp_input[torch.randperm(len(__UpperCAmelCase ) )]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase=False , __UpperCAmelCase=False ):
_lowercase : Tuple = Accelerator(
split_batches=__UpperCAmelCase , dispatch_batches=__UpperCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_lowercase : Any = get_training_setup(__UpperCAmelCase )
for iteration, batch in enumerate(__UpperCAmelCase ):
_lowercase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
_lowercase : Dict = accelerator.gather((ddp_input, ddp_target) )
_lowercase : Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__UpperCAmelCase ):
step_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__UpperCAmelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_lowercase : Tuple = ddp_input[torch.randperm(len(__UpperCAmelCase ) )]
GradientState._reset_state()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase=False , __UpperCAmelCase=False ):
_lowercase : Union[str, Any] = Accelerator(
split_batches=__UpperCAmelCase , dispatch_batches=__UpperCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_lowercase : Union[str, Any] = get_training_setup(__UpperCAmelCase , __UpperCAmelCase )
for iteration, batch in enumerate(__UpperCAmelCase ):
_lowercase : Any = batch.values()
# Gather the distributed inputs and targs for the base model
_lowercase : List[Any] = accelerator.gather((ddp_input, ddp_target) )
_lowercase : Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__UpperCAmelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__UpperCAmelCase ):
step_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
_lowercase : str = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__UpperCAmelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Optional[Any] = Accelerator()
_lowercase : int = RegressionDataset(length=80 )
_lowercase : Tuple = DataLoader(__UpperCAmelCase , batch_size=16 )
_lowercase : Optional[Any] = RegressionDataset(length=96 )
_lowercase : List[str] = DataLoader(__UpperCAmelCase , batch_size=16 )
_lowercase : Dict = accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__UpperCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__UpperCAmelCase )
if iteration < len(__UpperCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__UpperCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__UpperCAmelCase )
if batch_num < len(__UpperCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : str = Accelerator()
_lowercase : Optional[Any] = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(__UpperCAmelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(__UpperCAmelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(__UpperCAmelCase , __UpperCAmelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(__UpperCAmelCase , __UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 368 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase: Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = XLNetTokenizer
SCREAMING_SNAKE_CASE_ : Dict = XLNetTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
def lowerCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = """<s>"""
_lowercase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) ,UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""<eod>""" )
self.assertEqual(len(UpperCAmelCase_ ) ,10_06 )
def lowerCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,10_00 )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[2_85, 46, 10, 1_70, 3_82] )
_lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
_lowercase : List[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""▁he""", """ll""", """o"""] )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
@slow
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
_lowercase : int = tokenizer.encode("""sequence builders""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
_lowercase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ,UpperCAmelCase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase__ ( self ):
# fmt: off
_lowercase : Union[str, Any] = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ ,model_name="""xlnet-base-cased""" ,revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" ,)
| 336 | 0 |
"""simple docstring"""
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
UpperCAmelCase: List[str] = True
from torch.cuda.amp import autocast
UpperCAmelCase: Tuple = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase=None , __UpperCAmelCase=None ):
return field(default_factory=lambda: default , metadata=__UpperCAmelCase )
@dataclass
class UpperCamelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
SCREAMING_SNAKE_CASE_ : Optional[bool] = field(
default=snake_case , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class UpperCamelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=snake_case , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=snake_case , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=snake_case , metadata={"help": "The number of processes to use for the preprocessing."} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=snake_case , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE_ : List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class UpperCamelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : WavaVecaProcessor
SCREAMING_SNAKE_CASE_ : Union[bool, str] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
def __call__( self ,UpperCAmelCase_ ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
_lowercase : Any = [{"""input_values""": feature["""input_values"""]} for feature in features]
_lowercase : Union[str, Any] = [{"""input_ids""": feature["""labels"""]} for feature in features]
_lowercase : List[Any] = self.processor.pad(
UpperCAmelCase_ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" ,)
_lowercase : Union[str, Any] = self.processor.pad(
labels=UpperCAmelCase_ ,padding=self.padding ,max_length=self.max_length_labels ,pad_to_multiple_of=self.pad_to_multiple_of_labels ,return_tensors="""pt""" ,)
# replace padding with -100 to ignore loss correctly
_lowercase : Optional[Any] = labels_batch["""input_ids"""].masked_fill(labels_batch.attention_mask.ne(1 ) ,-1_00 )
_lowercase : Optional[Any] = labels
return batch
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
model.train()
_lowercase : List[str] = self._prepare_inputs(UpperCAmelCase_ )
if self.use_amp:
with autocast():
_lowercase : Optional[int] = self.compute_loss(UpperCAmelCase_ ,UpperCAmelCase_ )
else:
_lowercase : List[str] = self.compute_loss(UpperCAmelCase_ ,UpperCAmelCase_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_lowercase : Optional[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowercase : Tuple = loss.sum() / (inputs["""labels"""] >= 0).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
_lowercase : int = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCAmelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCAmelCase_ ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCAmelCase_ )
else:
loss.backward()
return loss.detach()
def __SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_lowercase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , __UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_lowercase : List[Any] = datasets.load_dataset(
"""common_voice""" , data_args.dataset_config_name , split=data_args.train_split_name )
_lowercase : Optional[Any] = datasets.load_dataset("""common_voice""" , data_args.dataset_config_name , split="""test""" )
# Create and save tokenizer
_lowercase : Any = F"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(__UpperCAmelCase ):
_lowercase : Optional[Any] = re.sub(__UpperCAmelCase , """""" , batch["""sentence"""] ).lower() + """ """
return batch
_lowercase : Optional[Any] = train_dataset.map(__UpperCAmelCase , remove_columns=["""sentence"""] )
_lowercase : Optional[Any] = eval_dataset.map(__UpperCAmelCase , remove_columns=["""sentence"""] )
def extract_all_chars(__UpperCAmelCase ):
_lowercase : str = """ """.join(batch["""text"""] )
_lowercase : Any = list(set(__UpperCAmelCase ) )
return {"vocab": [vocab], "all_text": [all_text]}
_lowercase : Dict = train_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , batch_size=-1 , keep_in_memory=__UpperCAmelCase , remove_columns=train_dataset.column_names , )
_lowercase : Tuple = train_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , batch_size=-1 , keep_in_memory=__UpperCAmelCase , remove_columns=eval_dataset.column_names , )
_lowercase : Optional[int] = list(set(vocab_train["""vocab"""][0] ) | set(vocab_test["""vocab"""][0] ) )
_lowercase : Dict = {v: k for k, v in enumerate(__UpperCAmelCase )}
_lowercase : int = vocab_dict[""" """]
del vocab_dict[" "]
_lowercase : Union[str, Any] = len(__UpperCAmelCase )
_lowercase : Optional[int] = len(__UpperCAmelCase )
with open("""vocab.json""" , """w""" ) as vocab_file:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : List[str] = WavaVecaCTCTokenizer(
"""vocab.json""" , unk_token="""[UNK]""" , pad_token="""[PAD]""" , word_delimiter_token="""|""" , )
_lowercase : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase )
_lowercase : Union[str, Any] = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
_lowercase : List[str] = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="""mean""" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_lowercase : Dict = min(len(__UpperCAmelCase ) , data_args.max_train_samples )
_lowercase : List[Any] = train_dataset.select(range(__UpperCAmelCase ) )
if data_args.max_val_samples is not None:
_lowercase : str = eval_dataset.select(range(data_args.max_val_samples ) )
_lowercase : Union[str, Any] = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__UpperCAmelCase ):
_lowercase : int = torchaudio.load(batch["""path"""] )
_lowercase : Any = resampler(__UpperCAmelCase ).squeeze().numpy()
_lowercase : Tuple = 16000
_lowercase : Any = batch["""text"""]
return batch
_lowercase : str = train_dataset.map(
__UpperCAmelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_lowercase : int = eval_dataset.map(
__UpperCAmelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(__UpperCAmelCase ):
# check that all files have the correct sampling rate
assert (
len(set(batch["""sampling_rate"""] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
_lowercase : Union[str, Any] = processor(
audio=batch["""speech"""] , text=batch["""target_text"""] , sampling_rate=batch["""sampling_rate"""][0] )
batch.update(__UpperCAmelCase )
return batch
_lowercase : Union[str, Any] = train_dataset.map(
__UpperCAmelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , )
_lowercase : Dict = eval_dataset.map(
__UpperCAmelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , )
# Metric
_lowercase : Dict = datasets.load_metric("""wer""" )
def compute_metrics(__UpperCAmelCase ):
_lowercase : int = pred.predictions
_lowercase : str = np.argmax(__UpperCAmelCase , axis=-1 )
_lowercase : Optional[Any] = processor.tokenizer.pad_token_id
_lowercase : Tuple = processor.batch_decode(__UpperCAmelCase )
# we do not want to group tokens when computing the metrics
_lowercase : Optional[int] = processor.batch_decode(pred.label_ids , group_tokens=__UpperCAmelCase )
_lowercase : str = wer_metric.compute(predictions=__UpperCAmelCase , references=__UpperCAmelCase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_lowercase : Any = DataCollatorCTCWithPadding(processor=__UpperCAmelCase , padding=__UpperCAmelCase )
# Initialize our Trainer
_lowercase : Dict = CTCTrainer(
model=__UpperCAmelCase , data_collator=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_lowercase : int = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_lowercase : List[str] = model_args.model_name_or_path
else:
_lowercase : Dict = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_lowercase : List[Any] = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
trainer.save_model()
_lowercase : int = train_result.metrics
_lowercase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
_lowercase : int = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics("""train""" , __UpperCAmelCase )
trainer.save_metrics("""train""" , __UpperCAmelCase )
trainer.save_state()
# Evaluation
_lowercase : List[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowercase : Tuple = trainer.evaluate()
_lowercase : Optional[Any] = data_args.max_val_samples if data_args.max_val_samples is not None else len(__UpperCAmelCase )
_lowercase : Dict = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics("""eval""" , __UpperCAmelCase )
trainer.save_metrics("""eval""" , __UpperCAmelCase )
return results
if __name__ == "__main__":
main()
| 369 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 0 |
"""simple docstring"""
from math import factorial, radians
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase = 18 , __UpperCAmelCase = 10 ):
_lowercase : int = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
_lowercase : List[Any] = radians(__UpperCAmelCase )
_lowercase : List[Any] = angle_in_radians
_lowercase : Union[str, Any] = 3
_lowercase : Union[str, Any] = -1
for _ in range(__UpperCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(__UpperCAmelCase )
_lowercase : Tuple = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 370 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : int = []
for line in lines:
_lowercase : Dict = re.sub(R"""#.*""" , """""" , __UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(__UpperCAmelCase )
_lowercase : Tuple = """\n""".join(__UpperCAmelCase )
# Make a hash from all this code
_lowercase : Tuple = full_str.encode("""utf-8""" )
return shaaaa(__UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase: Tuple = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase: List[str] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase: Any = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
UpperCAmelCase: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 336 | 0 |
"""simple docstring"""
from manim import *
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : int = Rectangle(height=0.5 ,width=0.5 )
_lowercase : Union[str, Any] = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_lowercase : Dict = [mem.copy() for i in range(6 )]
_lowercase : Any = [mem.copy() for i in range(6 )]
_lowercase : str = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ ,buff=0 )
_lowercase : Optional[Any] = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ ,buff=0 )
_lowercase : Tuple = VGroup(UpperCAmelCase_ ,UpperCAmelCase_ ).arrange(UpperCAmelCase_ ,buff=0 )
_lowercase : Optional[int] = Text("""CPU""" ,font_size=24 )
_lowercase : Dict = Group(UpperCAmelCase_ ,UpperCAmelCase_ ).arrange(UpperCAmelCase_ ,buff=0.5 ,aligned_edge=UpperCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase_ )
_lowercase : Tuple = [mem.copy() for i in range(1 )]
_lowercase : List[Any] = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ ,buff=0 )
_lowercase : Any = Text("""GPU""" ,font_size=24 )
_lowercase : Any = Group(UpperCAmelCase_ ,UpperCAmelCase_ ).arrange(UpperCAmelCase_ ,buff=0.5 ,aligned_edge=UpperCAmelCase_ )
gpu.align_to(UpperCAmelCase_ ,UpperCAmelCase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCAmelCase_ )
_lowercase : List[Any] = [mem.copy() for i in range(6 )]
_lowercase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ ,buff=0 )
_lowercase : Tuple = Text("""Model""" ,font_size=24 )
_lowercase : Optional[int] = Group(UpperCAmelCase_ ,UpperCAmelCase_ ).arrange(UpperCAmelCase_ ,buff=0.5 ,aligned_edge=UpperCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCAmelCase_ ,run_time=1 ) ,Create(UpperCAmelCase_ ,run_time=1 ) ,Create(UpperCAmelCase_ ,run_time=1 ) ,)
_lowercase : int = MarkupText(
f"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" ,font_size=24 ,)
_lowercase : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase : str = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ ,run_time=2.5 ) ,Write(UpperCAmelCase_ ) ,Write(UpperCAmelCase_ ) )
self.add(UpperCAmelCase_ )
_lowercase : Union[str, Any] = []
_lowercase : Optional[Any] = []
_lowercase : Optional[Any] = []
for i, rect in enumerate(UpperCAmelCase_ ):
_lowercase : Any = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase_ ,opacity=0.7 )
cpu_target.move_to(UpperCAmelCase_ )
cpu_target.generate_target()
_lowercase : List[Any] = 0.46 / 4
_lowercase : Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=UpperCAmelCase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=UpperCAmelCase_ ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=UpperCAmelCase_ ,buff=0.0 )
cpu_targs.append(UpperCAmelCase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCAmelCase_ ) )
second_animations.append(MoveToTarget(UpperCAmelCase_ ,run_time=1.5 ) )
self.play(*UpperCAmelCase_ )
self.play(*UpperCAmelCase_ )
self.wait()
| 371 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 336 | 0 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "Speech2TextFeatureExtractor"
SCREAMING_SNAKE_CASE_ : List[str] = "Speech2TextTokenizer"
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ )
_lowercase : Optional[Any] = self.feature_extractor
_lowercase : List[str] = False
def __call__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase_ ,**UpperCAmelCase_ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
_lowercase : List[Any] = kwargs.pop("""raw_speech""" )
else:
_lowercase : List[str] = kwargs.pop("""audio""" ,UpperCAmelCase_ )
_lowercase : List[Any] = kwargs.pop("""sampling_rate""" ,UpperCAmelCase_ )
_lowercase : Any = kwargs.pop("""text""" ,UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
_lowercase : str = args[0]
_lowercase : Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
_lowercase : int = self.feature_extractor(UpperCAmelCase_ ,*UpperCAmelCase_ ,sampling_rate=UpperCAmelCase_ ,**UpperCAmelCase_ )
if text is not None:
_lowercase : List[Any] = self.tokenizer(UpperCAmelCase_ ,**UpperCAmelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowercase : Dict = encodings["""input_ids"""]
return inputs
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
@contextmanager
def lowerCamelCase__ ( self ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
_lowercase : Union[str, Any] = True
_lowercase : int = self.tokenizer
yield
_lowercase : Optional[Any] = self.feature_extractor
_lowercase : Union[str, Any] = False
| 350 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCAmelCase: Any = generate_large_matrix()
UpperCAmelCase: Dict = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
_lowercase : List[Any] = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_lowercase : Tuple = (left + right) // 2
_lowercase : List[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_lowercase : Dict = mid + 1
else:
_lowercase : Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Any = 0
_lowercase : Optional[int] = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print("""Running benchmarks""" )
_lowercase : Tuple = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_lowercase : Dict = timeit(F"""{func}(grid=grid)""" , setup=__UpperCAmelCase , number=500 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 336 | 0 |
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_=0.2 ,UpperCAmelCase_=0.2 ):
_lowercase : int = bp_numa
_lowercase : str = bp_numa
_lowercase : List[Any] = bp_numa
_lowercase : List[str] = conva_get[:2]
_lowercase : int = conva_get[2]
_lowercase : int = size_pa
_lowercase : List[Any] = rate_w
_lowercase : str = rate_t
_lowercase : List[str] = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
_lowercase : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
_lowercase : Optional[Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
_lowercase : Tuple = -2 * np.random.rand(self.conva[1] ) + 1
_lowercase : List[str] = -2 * np.random.rand(self.num_bpa ) + 1
_lowercase : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
# save model dict with pickle
_lowercase : Any = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(UpperCAmelCase_ ,"""wb""" ) as f:
pickle.dump(UpperCAmelCase_ ,UpperCAmelCase_ )
print(f"""Model saved: {save_path}""" )
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ):
# read saved model
with open(UpperCAmelCase_ ,"""rb""" ) as f:
_lowercase : Any = pickle.load(UpperCAmelCase_ ) # noqa: S301
_lowercase : int = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
_lowercase : int = model_dic.get("""size_pooling1""" )
_lowercase : Tuple = model_dic.get("""num_bp1""" )
_lowercase : Optional[int] = model_dic.get("""num_bp2""" )
_lowercase : Union[str, Any] = model_dic.get("""num_bp3""" )
_lowercase : Optional[int] = model_dic.get("""rate_weight""" )
_lowercase : List[str] = model_dic.get("""rate_thre""" )
# create model instance
_lowercase : Optional[int] = CNN(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
# modify model parameter
_lowercase : Any = model_dic.get("""w_conv1""" )
_lowercase : str = model_dic.get("""wkj""" )
_lowercase : List[str] = model_dic.get("""vji""" )
_lowercase : Optional[int] = model_dic.get("""thre_conv1""" )
_lowercase : List[str] = model_dic.get("""thre_bp2""" )
_lowercase : List[Any] = model_dic.get("""thre_bp3""" )
return conv_ins
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return 1 / (1 + np.exp(-1 * x ))
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return round(UpperCAmelCase_ ,3 )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
# convolution process
_lowercase : Any = convs[0]
_lowercase : Union[str, Any] = convs[1]
_lowercase : Optional[int] = np.shape(UpperCAmelCase_ )[0]
# get the data slice of original image data, data_focus
_lowercase : List[Any] = []
for i_focus in range(0 ,size_data - size_conv + 1 ,UpperCAmelCase_ ):
for j_focus in range(0 ,size_data - size_conv + 1 ,UpperCAmelCase_ ):
_lowercase : Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(UpperCAmelCase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
_lowercase : Any = []
_lowercase : Tuple = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(UpperCAmelCase_ ):
_lowercase : str = []
for i_focus in range(len(UpperCAmelCase_ ) ):
_lowercase : List[str] = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(UpperCAmelCase_ ) )
_lowercase : List[str] = np.asmatrix(UpperCAmelCase_ ).reshape(
UpperCAmelCase_ ,UpperCAmelCase_ )
data_featuremap.append(UpperCAmelCase_ )
# expanding the data slice to One dimenssion
_lowercase : Tuple = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(UpperCAmelCase_ ) )
_lowercase : Optional[int] = np.asarray(UpperCAmelCase_ )
return focus_list, data_featuremap
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_="average_pool" ):
# pooling process
_lowercase : Any = len(featuremaps[0] )
_lowercase : Optional[int] = int(size_map / size_pooling )
_lowercase : List[str] = []
for i_map in range(len(UpperCAmelCase_ ) ):
_lowercase : Union[str, Any] = featuremaps[i_map]
_lowercase : List[str] = []
for i_focus in range(0 ,UpperCAmelCase_ ,UpperCAmelCase_ ):
for j_focus in range(0 ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : List[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(UpperCAmelCase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(UpperCAmelCase_ ) )
_lowercase : int = np.asmatrix(UpperCAmelCase_ ).reshape(UpperCAmelCase_ ,UpperCAmelCase_ )
featuremap_pooled.append(UpperCAmelCase_ )
return featuremap_pooled
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
# expanding three dimension data to one dimension list
_lowercase : List[Any] = []
for i in range(len(UpperCAmelCase_ ) ):
_lowercase : List[Any] = np.shape(data[i] )
_lowercase : Dict = data[i].reshape(1 ,shapes[0] * shapes[1] )
_lowercase : Any = data_listed.getA().tolist()[0]
data_expanded.extend(UpperCAmelCase_ )
_lowercase : Optional[int] = np.asarray(UpperCAmelCase_ )
return data_expanded
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
# expanding matrix to one dimension list
_lowercase : Optional[int] = np.asarray(UpperCAmelCase_ )
_lowercase : List[str] = np.shape(UpperCAmelCase_ )
_lowercase : List[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = []
_lowercase : Optional[Any] = 0
for i_map in range(UpperCAmelCase_ ):
_lowercase : List[Any] = np.ones((size_map, size_map) )
for i in range(0 ,UpperCAmelCase_ ,UpperCAmelCase_ ):
for j in range(0 ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[Any] = pd_pool[
i_pool
]
_lowercase : Dict = i_pool + 1
_lowercase : Tuple = np.multiply(
UpperCAmelCase_ ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(UpperCAmelCase_ )
return pd_all
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_=bool ):
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(UpperCAmelCase_ )) )
print((""" - - Shape: Teach_Data """, np.shape(UpperCAmelCase_ )) )
_lowercase : Union[str, Any] = 0
_lowercase : List[str] = []
_lowercase : int = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
_lowercase : int = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(UpperCAmelCase_ ) ):
# print('------------Learning Image: %d--------------'%p)
_lowercase : str = np.asmatrix(datas_train[p] )
_lowercase : str = np.asarray(datas_teach[p] )
_lowercase : Tuple = self.convolute(
UpperCAmelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
_lowercase : Tuple = self.pooling(UpperCAmelCase_ ,self.size_poolinga )
_lowercase : List[Any] = np.shape(UpperCAmelCase_ )
_lowercase : str = self._expand(UpperCAmelCase_ )
_lowercase : int = data_bp_input
_lowercase : List[Any] = np.dot(UpperCAmelCase_ ,self.vji.T ) - self.thre_bpa
_lowercase : List[Any] = self.sig(UpperCAmelCase_ )
_lowercase : Optional[Any] = np.dot(UpperCAmelCase_ ,self.wkj.T ) - self.thre_bpa
_lowercase : Union[str, Any] = self.sig(UpperCAmelCase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_lowercase : Any = np.multiply(
(data_teach - bp_outa) ,np.multiply(UpperCAmelCase_ ,(1 - bp_outa) ) )
_lowercase : Optional[Any] = np.multiply(
np.dot(UpperCAmelCase_ ,self.wkj ) ,np.multiply(UpperCAmelCase_ ,(1 - bp_outa) ) )
_lowercase : Tuple = np.dot(UpperCAmelCase_ ,self.vji )
_lowercase : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
_lowercase : List[str] = pd_conva_pooled.T.getA().tolist()
_lowercase : Dict = self._calculate_gradient_from_pool(
UpperCAmelCase_ ,UpperCAmelCase_ ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
_lowercase : Optional[int] = self._expand_mat(pd_conva_all[k_conv] )
_lowercase : Optional[Any] = self.rate_weight * np.dot(UpperCAmelCase_ ,UpperCAmelCase_ )
_lowercase : Tuple = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
_lowercase : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
_lowercase : Any = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_lowercase : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_lowercase : Union[str, Any] = self.thre_bpa - pd_k_all * self.rate_thre
_lowercase : List[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_lowercase : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_lowercase : Any = rp + 1
_lowercase : Optional[int] = error_count / patterns
all_mse.append(UpperCAmelCase_ )
def draw_error():
_lowercase : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(UpperCAmelCase_ ,"""+-""" )
plt.plot(UpperCAmelCase_ ,"""r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(UpperCAmelCase_ ,alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
# model predict
_lowercase : Optional[int] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(UpperCAmelCase_ )) )
for p in range(len(UpperCAmelCase_ ) ):
_lowercase : Tuple = np.asmatrix(datas_test[p] )
_lowercase : Optional[Any] = self.convolute(
UpperCAmelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
_lowercase : List[str] = self.pooling(UpperCAmelCase_ ,self.size_poolinga )
_lowercase : Dict = self._expand(UpperCAmelCase_ )
_lowercase : Optional[int] = data_bp_input
_lowercase : Tuple = bp_outa * self.vji.T - self.thre_bpa
_lowercase : Union[str, Any] = self.sig(UpperCAmelCase_ )
_lowercase : Union[str, Any] = bp_outa * self.wkj.T - self.thre_bpa
_lowercase : Dict = self.sig(UpperCAmelCase_ )
produce_out.extend(bp_outa.getA().tolist() )
_lowercase : Any = [list(map(self.do_round ,UpperCAmelCase_ ) ) for each in produce_out]
return np.asarray(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
# return the data of image after convoluting process so we can check it out
_lowercase : Dict = np.asmatrix(UpperCAmelCase_ )
_lowercase : Tuple = self.convolute(
UpperCAmelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
_lowercase : Union[str, Any] = self.pooling(UpperCAmelCase_ ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 351 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase: List[str] = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase: int = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
re.sub("""<n>""" , """""" , __UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
| 336 | 0 |
"""simple docstring"""
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
UpperCAmelCase: List[Any] = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase=None ):
if subparsers is not None:
_lowercase : int = subparsers.add_parser("""tpu-config""" , description=_description )
else:
_lowercase : Optional[int] = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
_lowercase : List[str] = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=__UpperCAmelCase , default=__UpperCAmelCase , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=__UpperCAmelCase , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=__UpperCAmelCase , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
_lowercase : int = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=__UpperCAmelCase , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=__UpperCAmelCase )
return parser
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__UpperCAmelCase ):
_lowercase : Tuple = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_lowercase : List[str] = defaults.command_file
if not args.command and defaults.commands is not None:
_lowercase : int = defaults.commands
if not args.tpu_name:
_lowercase : Optional[int] = defaults.tpu_name
if not args.tpu_zone:
_lowercase : Dict = defaults.tpu_zone
if args.accelerate_version == "dev":
_lowercase : int = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
_lowercase : List[Any] = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , __UpperCAmelCase ):
_lowercase : List[str] = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
_lowercase : Dict = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __UpperCAmelCase ):
_lowercase : Optional[Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_lowercase : int = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
_lowercase : Optional[int] = """; """.join(__UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_lowercase : Any = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {" ".join(__UpperCAmelCase )}""" )
return
subprocess.run(__UpperCAmelCase )
print("""Successfully setup pod.""" )
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Union[str, Any] = tpu_command_parser()
_lowercase : Optional[int] = parser.parse_args()
tpu_command_launcher(__UpperCAmelCase )
| 352 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowercase : str = []
for i in range(__UpperCAmelCase ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class UpperCamelCase ( snake_case , snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ : str = 2
@register_to_config
def __init__( self ,UpperCAmelCase_ = 10_00 ,UpperCAmelCase_ = 0.00085 ,UpperCAmelCase_ = 0.012 ,UpperCAmelCase_ = "linear" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = "epsilon" ,UpperCAmelCase_ = "linspace" ,UpperCAmelCase_ = 0 ,):
if trained_betas is not None:
_lowercase : str = torch.tensor(UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
_lowercase : Optional[Any] = torch.linspace(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Any = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,UpperCAmelCase_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Any = betas_for_alpha_bar(UpperCAmelCase_ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
_lowercase : Tuple = 1.0 - self.betas
_lowercase : Dict = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ):
if schedule_timesteps is None:
_lowercase : Optional[int] = self.timesteps
_lowercase : Union[str, Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowercase : Optional[Any] = 1 if len(UpperCAmelCase_ ) > 1 else 0
else:
_lowercase : Dict = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
_lowercase : List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCamelCase__ ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
_lowercase : str = self.index_for_timestep(UpperCAmelCase_ )
if self.state_in_first_order:
_lowercase : Optional[Any] = self.sigmas[step_index]
else:
_lowercase : Dict = self.sigmas_interpol[step_index]
_lowercase : Optional[int] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,):
_lowercase : List[str] = num_inference_steps
_lowercase : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowercase : Dict = np.linspace(0 ,num_train_timesteps - 1 ,UpperCAmelCase_ ,dtype=UpperCAmelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowercase : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : str = (np.arange(0 ,UpperCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCAmelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowercase : str = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : Optional[int] = (np.arange(UpperCAmelCase_ ,0 ,-step_ratio )).round().copy().astype(UpperCAmelCase_ )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_lowercase : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowercase : Optional[Any] = torch.from_numpy(np.log(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_lowercase : List[str] = np.interp(UpperCAmelCase_ ,np.arange(0 ,len(UpperCAmelCase_ ) ) ,UpperCAmelCase_ )
_lowercase : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowercase : Any = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ )
# interpolate sigmas
_lowercase : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
_lowercase : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_lowercase : Tuple = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCAmelCase_ ).startswith("""mps""" ):
# mps does not support float64
_lowercase : Tuple = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=torch.floataa )
else:
_lowercase : str = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ )
# interpolate timesteps
_lowercase : int = self.sigma_to_t(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=timesteps.dtype )
_lowercase : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
_lowercase : str = torch.cat([timesteps[:1], interleaved_timesteps] )
_lowercase : List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowercase : Optional[Any] = defaultdict(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
# get log sigma
_lowercase : Optional[Any] = sigma.log()
# get distribution
_lowercase : Optional[int] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowercase : Tuple = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_lowercase : List[Any] = low_idx + 1
_lowercase : int = self.log_sigmas[low_idx]
_lowercase : Any = self.log_sigmas[high_idx]
# interpolate sigmas
_lowercase : Any = (low - log_sigma) / (low - high)
_lowercase : Dict = w.clamp(0 ,1 )
# transform interpolation to time range
_lowercase : List[str] = (1 - w) * low_idx + w * high_idx
_lowercase : Optional[int] = t.view(sigma.shape )
return t
@property
def lowerCamelCase__ ( self ):
return self.sample is None
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = True ,):
_lowercase : Optional[int] = self.index_for_timestep(UpperCAmelCase_ )
# advance index counter by 1
_lowercase : str = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowercase : Any = self.sigmas[step_index]
_lowercase : Any = self.sigmas_interpol[step_index + 1]
_lowercase : Tuple = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowercase : Union[str, Any] = self.sigmas[step_index - 1]
_lowercase : int = self.sigmas_interpol[step_index]
_lowercase : Tuple = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowercase : Any = 0
_lowercase : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowercase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowercase : str = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowercase : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowercase : Any = sigma_interpol - sigma_hat
# store for 2nd order step
_lowercase : List[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowercase : Optional[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowercase : Optional[Any] = sigma_next - sigma_hat
_lowercase : Any = self.sample
_lowercase : Optional[int] = None
_lowercase : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_lowercase : int = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCAmelCase_ ):
# mps does not support float64
_lowercase : str = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_lowercase : Any = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_lowercase : List[Any] = self.timesteps.to(original_samples.device )
_lowercase : Union[str, Any] = timesteps.to(original_samples.device )
_lowercase : List[Any] = [self.index_for_timestep(UpperCAmelCase_ ,UpperCAmelCase_ ) for t in timesteps]
_lowercase : Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowercase : List[Any] = sigma.unsqueeze(-1 )
_lowercase : int = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 336 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = tempfile.mkdtemp()
_lowercase : List[str] = BlipImageProcessor()
_lowercase : int = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
_lowercase : Optional[int] = BlipProcessor(UpperCAmelCase_ ,UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ).tokenizer
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ).image_processor
def lowerCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : int = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
_lowercase : Dict = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase : str = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_lowercase : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
_lowercase : Optional[int] = BlipProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = self.get_image_processor()
_lowercase : Any = self.get_tokenizer()
_lowercase : Any = BlipProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : List[Any] = self.prepare_image_inputs()
_lowercase : Any = image_processor(UpperCAmelCase_ ,return_tensors="""np""" )
_lowercase : Optional[int] = processor(images=UpperCAmelCase_ ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : List[str] = self.get_tokenizer()
_lowercase : str = BlipProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Any = """lower newer"""
_lowercase : Tuple = processor(text=UpperCAmelCase_ )
_lowercase : Tuple = tokenizer(UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def lowerCamelCase__ ( self ):
_lowercase : int = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : Tuple = BlipProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Union[str, Any] = """lower newer"""
_lowercase : Dict = self.prepare_image_inputs()
_lowercase : Any = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowerCamelCase__ ( self ):
_lowercase : str = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : Optional[Any] = BlipProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : int = processor.batch_decode(UpperCAmelCase_ )
_lowercase : Dict = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : List[str] = self.get_image_processor()
_lowercase : Tuple = self.get_tokenizer()
_lowercase : Any = BlipProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : int = """lower newer"""
_lowercase : Optional[int] = self.prepare_image_inputs()
_lowercase : Tuple = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
| 353 |
"""simple docstring"""
import pprint
import requests
UpperCAmelCase: Tuple = """https://zenquotes.io/api"""
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
UpperCAmelCase: int = random_quotes()
pprint.pprint(response)
| 336 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase: Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase: Union[str, Any] = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = "vit_msn"
def __init__( self ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-06 ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=16 ,UpperCAmelCase_=3 ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : str = intermediate_size
_lowercase : Tuple = hidden_act
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : List[str] = initializer_range
_lowercase : List[Any] = layer_norm_eps
_lowercase : List[Any] = image_size
_lowercase : str = patch_size
_lowercase : List[Any] = num_channels
_lowercase : Any = qkv_bias
| 354 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : int
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_lowercase : Tuple = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_lowercase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_lowercase : Optional[Any] = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_lowercase : int = [""""""] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = """Provide a string that I will generate its BWT transform: """
UpperCAmelCase: int = input(entry_msg).strip()
UpperCAmelCase: List[str] = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
UpperCAmelCase: Union[str, Any] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
)
| 336 | 0 |
"""simple docstring"""
import pprint
import requests
UpperCAmelCase: Tuple = """https://zenquotes.io/api"""
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
UpperCAmelCase: int = random_quotes()
pprint.pprint(response)
| 355 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Dict = [randint(-1000 , 1000 ) for i in range(10 )]
_lowercase : Tuple = randint(-5000 , 5000 )
return (arr, r)
UpperCAmelCase: int = make_dataset()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
for triplet in permutations(__UpperCAmelCase , 3 ):
if sum(__UpperCAmelCase ) == target:
return tuple(sorted(__UpperCAmelCase ) )
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
arr.sort()
_lowercase : Optional[Any] = len(__UpperCAmelCase )
for i in range(n - 1 ):
_lowercase , _lowercase : str = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Tuple = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
_lowercase : Union[str, Any] = """
triplet_sum1(*dataset)
"""
_lowercase : Union[str, Any] = """
triplet_sum2(*dataset)
"""
_lowercase : Dict = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 )
_lowercase : Any = repeat(setup=__UpperCAmelCase , stmt=__UpperCAmelCase , repeat=5 , number=10000 )
return (min(__UpperCAmelCase ), min(__UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase: Any = solution_times()
print(F'The time for naive implementation is {times[0]}.')
print(F'The time for optimized implementation is {times[1]}.')
| 336 | 0 |
"""simple docstring"""
import argparse
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Any = f.readlines()
_lowercase : Optional[int] = F"""class {class_name}("""
_lowercase : List[str] = F"""{4 * " "}def {test_name}("""
_lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}"""
_lowercase : int = F"""{16 * " "}{correct_line.split()[0]}"""
_lowercase : str = False
_lowercase : Optional[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = False
_lowercase : int = 0
_lowercase : Tuple = 0
_lowercase : Union[str, Any] = []
for line in lines:
if line.startswith(__UpperCAmelCase ):
_lowercase : List[str] = True
elif in_class and line.startswith(__UpperCAmelCase ):
_lowercase : str = True
elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )):
_lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : Optional[int] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Optional[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_lowercase : Union[str, Any] = False
else:
new_lines.append(__UpperCAmelCase )
with open(__UpperCAmelCase , """w""" ) as f:
for line in new_lines:
f.write(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ):
if fail is not None:
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Dict = {l.strip() for l in f.readlines()}
else:
_lowercase : int = None
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : int = f.readlines()
_lowercase : int = defaultdict(__UpperCAmelCase )
for line in correct_lines:
_lowercase : int = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase: List[Any] = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
UpperCAmelCase: Any = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 356 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : Optional[int] = "BlipImageProcessor"
SCREAMING_SNAKE_CASE_ : Tuple = "AutoTokenizer"
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ )
# add QFormer tokenizer
_lowercase : Optional[int] = qformer_tokenizer
def __call__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
_lowercase : List[Any] = BatchFeature()
if text is not None:
_lowercase : List[str] = self.tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
encoding.update(UpperCAmelCase_ )
_lowercase : Dict = self.qformer_tokenizer(
text=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
_lowercase : str = qformer_text_encoding.pop("""input_ids""" )
_lowercase : int = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
_lowercase : Optional[Any] = self.image_processor(UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ )
encoding.update(UpperCAmelCase_ )
return encoding
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.tokenizer.model_input_names
_lowercase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
if os.path.isfile(UpperCAmelCase_ ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCAmelCase_ ,exist_ok=UpperCAmelCase_ )
_lowercase : Any = os.path.join(UpperCAmelCase_ ,"""qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(UpperCAmelCase_ )
return super().save_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
_lowercase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ ,subfolder="""qformer_tokenizer""" )
_lowercase : Any = cls._get_arguments_from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
args.append(UpperCAmelCase_ )
return cls(*UpperCAmelCase_ )
| 336 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
UpperCAmelCase = ["""small""", """medium""", """large"""]
UpperCAmelCase = """lm_head.decoder.weight"""
UpperCAmelCase = """lm_head.weight"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = torch.load(__UpperCAmelCase )
_lowercase : Optional[Any] = d.pop(__UpperCAmelCase )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
torch.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
UpperCAmelCase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
UpperCAmelCase = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl')
UpperCAmelCase = F'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 357 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase: Tuple = logging.get_logger(__name__)
UpperCAmelCase: List[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ : Optional[int] = ["past_key_values"]
SCREAMING_SNAKE_CASE_ : Tuple = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self ,UpperCAmelCase_=1_00 ,UpperCAmelCase_=5 ,UpperCAmelCase_=1 ,UpperCAmelCase_=1 ,UpperCAmelCase_=2_49 ,UpperCAmelCase_=6 ,UpperCAmelCase_=17 ,UpperCAmelCase_=25 ,UpperCAmelCase_=4 ,UpperCAmelCase_=4 ,UpperCAmelCase_=1_28 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.0006 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=1 ,UpperCAmelCase_=True ,UpperCAmelCase_=1 ,UpperCAmelCase_=5_02_56 ,UpperCAmelCase_=5_02_56 ,**UpperCAmelCase_ ,):
_lowercase : Dict = vocab_size
_lowercase : List[str] = action_weight
_lowercase : int = reward_weight
_lowercase : List[Any] = value_weight
_lowercase : List[str] = max_position_embeddings
_lowercase : Any = block_size
_lowercase : Any = action_dim
_lowercase : List[str] = observation_dim
_lowercase : Union[str, Any] = transition_dim
_lowercase : str = learning_rate
_lowercase : Tuple = n_layer
_lowercase : Optional[int] = n_head
_lowercase : List[str] = n_embd
_lowercase : List[str] = embd_pdrop
_lowercase : Optional[Any] = attn_pdrop
_lowercase : List[Any] = resid_pdrop
_lowercase : str = initializer_range
_lowercase : Optional[Any] = layer_norm_eps
_lowercase : List[Any] = kaiming_initializer_range
_lowercase : List[Any] = use_cache
super().__init__(pad_token_id=UpperCAmelCase_ ,bos_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
| 336 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase: Any = {
"""configuration_encodec""": [
"""ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EncodecConfig""",
],
"""feature_extraction_encodec""": ["""EncodecFeatureExtractor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: int = [
"""ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EncodecModel""",
"""EncodecPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCAmelCase: Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 358 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase: Any = logging.get_logger(__name__)
UpperCAmelCase: List[str] = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "instructblip_vision_model"
def __init__( self ,UpperCAmelCase_=14_08 ,UpperCAmelCase_=61_44 ,UpperCAmelCase_=39 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=14 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=1E-6 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=1E-10 ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Tuple = intermediate_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = patch_size
_lowercase : Optional[Any] = image_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[Any] = attention_dropout
_lowercase : List[Any] = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : Tuple = qkv_bias
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : List[Any] = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "instructblip_qformer"
def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=0 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=2 ,UpperCAmelCase_=14_08 ,**UpperCAmelCase_ ,):
super().__init__(pad_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : List[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : Optional[Any] = hidden_act
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : List[Any] = max_position_embeddings
_lowercase : Tuple = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Any = position_embedding_type
_lowercase : Dict = cross_attention_frequency
_lowercase : Optional[Any] = encoder_hidden_size
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : Dict = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : str = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "instructblip"
SCREAMING_SNAKE_CASE_ : List[str] = True
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=32 ,**UpperCAmelCase_ ):
super().__init__(**UpperCAmelCase_ )
if vision_config is None:
_lowercase : str = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
_lowercase : Any = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
_lowercase : Optional[int] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
_lowercase : int = InstructBlipVisionConfig(**UpperCAmelCase_ )
_lowercase : Optional[int] = InstructBlipQFormerConfig(**UpperCAmelCase_ )
_lowercase : Dict = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
_lowercase : str = CONFIG_MAPPING[text_model_type](**UpperCAmelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : Union[str, Any] = self.text_config.is_encoder_decoder
_lowercase : List[str] = num_query_tokens
_lowercase : List[str] = self.vision_config.hidden_size
_lowercase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : Union[str, Any] = 1.0
_lowercase : Dict = 0.02
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowercase : int = self.vision_config.to_dict()
_lowercase : Any = self.qformer_config.to_dict()
_lowercase : Any = self.text_config.to_dict()
_lowercase : Optional[int] = self.__class__.model_type
return output
| 336 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : Optional[Any] = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=__UpperCAmelCase )
_lowercase : Dict = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__UpperCAmelCase )
EnvironmentCommand.register_subcommand(__UpperCAmelCase )
TestCommand.register_subcommand(__UpperCAmelCase )
RunBeamCommand.register_subcommand(__UpperCAmelCase )
DummyDataCommand.register_subcommand(__UpperCAmelCase )
# Parse args
_lowercase : Optional[int] = parser.parse_known_args()
if not hasattr(__UpperCAmelCase , """func""" ):
parser.print_help()
exit(1 )
_lowercase : List[str] = parse_unknown_args(__UpperCAmelCase )
# Run
_lowercase : Optional[int] = args.func(__UpperCAmelCase , **__UpperCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 359 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if k in (0.04, 0.06):
_lowercase : Optional[Any] = k
_lowercase : Optional[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self ):
return str(self.k )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 )
_lowercase , _lowercase : Dict = img.shape
_lowercase : list[list[int]] = []
_lowercase : int = img.copy()
_lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB )
_lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ )
_lowercase : Optional[int] = dx**2
_lowercase : Optional[Any] = dy**2
_lowercase : Optional[Any] = dx * dy
_lowercase : List[str] = 0.04
_lowercase : Optional[Any] = self.window_size // 2
for y in range(UpperCAmelCase_ ,h - offset ):
for x in range(UpperCAmelCase_ ,w - offset ):
_lowercase : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Dict = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Union[str, Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : int = (wxx * wyy) - (wxy**2)
_lowercase : Union[str, Any] = wxx + wyy
_lowercase : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,2_55 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3)
UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 336 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase: Any = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Any = b.T
_lowercase : Union[str, Any] = np.sum(np.square(__UpperCAmelCase ) , axis=1 )
_lowercase : int = np.sum(np.square(__UpperCAmelCase ) , axis=0 )
_lowercase : Union[str, Any] = np.matmul(__UpperCAmelCase , __UpperCAmelCase )
_lowercase : int = aa[:, None] - 2 * ab + ba[None, :]
return d
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Optional[Any] = x.reshape(-1 , 3 )
_lowercase : List[str] = squared_euclidean_distance(__UpperCAmelCase , __UpperCAmelCase )
return np.argmin(__UpperCAmelCase , axis=1 )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["pixel_values"]
def __init__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = True ,UpperCAmelCase_ = True ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : Dict = size if size is not None else {"""height""": 2_56, """width""": 2_56}
_lowercase : Tuple = get_size_dict(UpperCAmelCase_ )
_lowercase : Optional[Any] = np.array(UpperCAmelCase_ ) if clusters is not None else None
_lowercase : Any = do_resize
_lowercase : Tuple = size
_lowercase : Dict = resample
_lowercase : Dict = do_normalize
_lowercase : Union[str, Any] = do_color_quantize
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
_lowercase : List[str] = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
UpperCAmelCase_ ,size=(size["""height"""], size["""width"""]) ,resample=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,):
_lowercase : Tuple = rescale(image=UpperCAmelCase_ ,scale=1 / 127.5 ,data_format=UpperCAmelCase_ )
_lowercase : Dict = image - 1
return image
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = ChannelDimension.FIRST ,**UpperCAmelCase_ ,):
_lowercase : Dict = do_resize if do_resize is not None else self.do_resize
_lowercase : Union[str, Any] = size if size is not None else self.size
_lowercase : str = get_size_dict(UpperCAmelCase_ )
_lowercase : int = resample if resample is not None else self.resample
_lowercase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Any = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_lowercase : Any = clusters if clusters is not None else self.clusters
_lowercase : Any = np.array(UpperCAmelCase_ )
_lowercase : Optional[int] = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
_lowercase : Optional[Any] = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
_lowercase : int = [self.resize(image=UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ) for image in images]
if do_normalize:
_lowercase : Dict = [self.normalize(image=UpperCAmelCase_ ) for image in images]
if do_color_quantize:
_lowercase : List[Any] = [to_channel_dimension_format(UpperCAmelCase_ ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_lowercase : int = np.array(UpperCAmelCase_ )
_lowercase : Optional[int] = color_quantize(UpperCAmelCase_ ,UpperCAmelCase_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
_lowercase : Dict = images.shape[0]
_lowercase : Optional[int] = images.reshape(UpperCAmelCase_ ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
_lowercase : Tuple = list(UpperCAmelCase_ )
else:
_lowercase : List[str] = [to_channel_dimension_format(UpperCAmelCase_ ,UpperCAmelCase_ ) for image in images]
_lowercase : Tuple = {"""input_ids""": images}
return BatchFeature(data=UpperCAmelCase_ ,tensor_type=UpperCAmelCase_ )
| 360 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = LEDTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = LEDTokenizerFast
SCREAMING_SNAKE_CASE_ : List[str] = True
def lowerCamelCase__ ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowercase : List[Any] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowercase : Dict = {"""unk_token""": """<unk>"""}
_lowercase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase__ ( self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowercase : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(UpperCAmelCase_ ,max_length=len(UpperCAmelCase_ ) ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
_lowercase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIn("""input_ids""" ,UpperCAmelCase_ )
self.assertIn("""attention_mask""" ,UpperCAmelCase_ )
self.assertNotIn("""labels""" ,UpperCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" ,UpperCAmelCase_ )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Tuple = tokenizer(text_target=UpperCAmelCase_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" )
self.assertEqual(32 ,targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : List[Any] = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""] ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase_ ,UpperCAmelCase_ )
self.assertEqual(batch.input_ids.shape ,(2, 51_22) )
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = ["""A long paragraph for summarization."""]
_lowercase : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : Dict = tokenizer(UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : List[str] = tokenizer(text_target=UpperCAmelCase_ ,return_tensors="""pt""" )
_lowercase : Union[str, Any] = inputs["""input_ids"""]
_lowercase : List[str] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowercase : str = ["""Summary of the text.""", """Another summary."""]
_lowercase : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowercase : Any = tokenizer(UpperCAmelCase_ ,padding=UpperCAmelCase_ )
_lowercase : str = [[0] * len(UpperCAmelCase_ ) for x in encoded_output["""input_ids"""]]
_lowercase : Optional[int] = tokenizer.pad(UpperCAmelCase_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : Dict = """A, <mask> AllenNLP sentence."""
_lowercase : List[Any] = tokenizer_r.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
_lowercase : Any = tokenizer_p.encode_plus(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
_lowercase : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCAmelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 336 | 0 |
"""simple docstring"""
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Dict = len(__UpperCAmelCase )
# We need to create solution object to save path.
_lowercase : int = [[0 for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )]
_lowercase : Optional[Any] = run_maze(__UpperCAmelCase , 0 , 0 , __UpperCAmelCase )
if solved:
print("""\n""".join(str(__UpperCAmelCase ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : List[Any] = len(__UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
_lowercase : Any = 1
return True
_lowercase : Optional[int] = (not i < 0) and (not j < 0) # Check lower bounds
_lowercase : Union[str, Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_lowercase : str = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_lowercase : Optional[int] = 1
# check for directions
if (
run_maze(__UpperCAmelCase , i + 1 , __UpperCAmelCase , __UpperCAmelCase )
or run_maze(__UpperCAmelCase , __UpperCAmelCase , j + 1 , __UpperCAmelCase )
or run_maze(__UpperCAmelCase , i - 1 , __UpperCAmelCase , __UpperCAmelCase )
or run_maze(__UpperCAmelCase , __UpperCAmelCase , j - 1 , __UpperCAmelCase )
):
return True
_lowercase : Optional[Any] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
"""simple docstring"""
import argparse
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Any = f.readlines()
_lowercase : Optional[int] = F"""class {class_name}("""
_lowercase : List[str] = F"""{4 * " "}def {test_name}("""
_lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}"""
_lowercase : int = F"""{16 * " "}{correct_line.split()[0]}"""
_lowercase : str = False
_lowercase : Optional[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = False
_lowercase : int = 0
_lowercase : Tuple = 0
_lowercase : Union[str, Any] = []
for line in lines:
if line.startswith(__UpperCAmelCase ):
_lowercase : List[str] = True
elif in_class and line.startswith(__UpperCAmelCase ):
_lowercase : str = True
elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )):
_lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : Optional[int] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Optional[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_lowercase : Union[str, Any] = False
else:
new_lines.append(__UpperCAmelCase )
with open(__UpperCAmelCase , """w""" ) as f:
for line in new_lines:
f.write(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ):
if fail is not None:
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Dict = {l.strip() for l in f.readlines()}
else:
_lowercase : int = None
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : int = f.readlines()
_lowercase : int = defaultdict(__UpperCAmelCase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : int = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase: List[Any] = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
UpperCAmelCase: Any = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 336 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Dict = s.rsplit(__UpperCAmelCase , __UpperCAmelCase )
return new.join(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = {}
_lowercase : Union[str, Any] = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
_lowercase : Union[str, Any] = key.replace(F"""{group_key}.""" , F"""{group_key}.group.""" )
if "res_path" in key:
_lowercase : Optional[Any] = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
_lowercase : Optional[int] = rreplace(__UpperCAmelCase , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
_lowercase : Tuple = rreplace(__UpperCAmelCase , """.b""" , """.bias""" , 1 )
_lowercase : Union[str, Any] = value.float()
return upgrade
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=True ):
from dall_e import Encoder
_lowercase : Optional[Any] = Encoder()
if os.path.exists(__UpperCAmelCase ):
_lowercase : List[str] = torch.load(__UpperCAmelCase )
else:
_lowercase : str = torch.hub.load_state_dict_from_url(__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = ckpt.state_dict()
encoder.load_state_dict(__UpperCAmelCase )
if config_path is not None:
_lowercase : Dict = FlavaImageCodebookConfig.from_pretrained(__UpperCAmelCase )
else:
_lowercase : Optional[Any] = FlavaImageCodebookConfig()
_lowercase : Union[str, Any] = FlavaImageCodebook(__UpperCAmelCase ).eval()
_lowercase : Dict = encoder.state_dict()
_lowercase : Any = upgrade_state_dict(__UpperCAmelCase )
hf_model.load_state_dict(__UpperCAmelCase )
_lowercase : List[Any] = hf_model.state_dict()
_lowercase : int = count_parameters(__UpperCAmelCase )
_lowercase : List[Any] = count_parameters(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(__UpperCAmelCase )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCAmelCase: str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
UpperCAmelCase: Optional[Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 362 |
"""simple docstring"""
UpperCAmelCase: List[str] = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 336 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Dict = dataset
_lowercase : Dict = process
_lowercase : Optional[int] = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self ,UpperCAmelCase_ ):
_lowercase : Dict = self.dataset[i]
_lowercase : str = self.process(UpperCAmelCase_ ,**self.params )
return processed
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_=None ):
_lowercase : Optional[Any] = loader
_lowercase : Any = infer
_lowercase : Tuple = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_lowercase : Tuple = None
_lowercase : int = loader_batch_size
# Internal bookkeeping
_lowercase : int = None
_lowercase : Optional[Any] = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
_lowercase : Optional[Any] = iter(self.loader )
return self
def lowerCamelCase__ ( self ):
if isinstance(self._loader_batch_data ,torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_lowercase : Optional[int] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_lowercase : Dict = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
# Convert ModelOutput to tuple first
_lowercase : int = element.to_tuple()
if isinstance(element[0] ,torch.Tensor ):
_lowercase : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] ,np.ndarray ):
_lowercase : Dict = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] ,torch.Tensor ):
_lowercase : List[Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] ,np.ndarray ):
_lowercase : int = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_lowercase : Union[str, Any] = None
elif isinstance(element[self._loader_batch_index] ,torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowercase : str = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] ,np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowercase : Optional[Any] = np.expand_dims(element[self._loader_batch_index] ,0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_lowercase : int = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_lowercase : int = self._loader_batch_data.__class__(UpperCAmelCase_ )
self._loader_batch_index += 1
return result
def lowerCamelCase__ ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_lowercase : List[str] = next(self.iterator )
_lowercase : List[str] = self.infer(UpperCAmelCase_ ,**self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCAmelCase_ ,torch.Tensor ):
_lowercase : int = processed
else:
_lowercase : Optional[Any] = list(processed.keys() )[0]
_lowercase : int = processed[key]
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : List[Any] = len(UpperCAmelCase_ )
else:
_lowercase : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowercase : Optional[Any] = observed_batch_size
# Setting internal index to unwrap the batch
_lowercase : Union[str, Any] = processed
_lowercase : Union[str, Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_=None ):
super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
def __iter__( self ):
_lowercase : List[Any] = iter(self.loader )
_lowercase : Any = None
return self
def lowerCamelCase__ ( self ):
if self.subiterator is None:
_lowercase : int = self.infer(next(self.iterator ) ,**self.params )
try:
# Try to return next item
_lowercase : int = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_lowercase : Dict = self.infer(next(self.iterator ) ,**self.params )
_lowercase : List[str] = next(self.subiterator )
return processed
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __iter__( self ):
_lowercase : Any = iter(self.loader )
return self
def lowerCamelCase__ ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_lowercase : int = False
_lowercase : Tuple = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_lowercase : Union[str, Any] = self.loader_batch_item()
_lowercase : Optional[int] = item.pop("""is_last""" )
accumulator.append(UpperCAmelCase_ )
if is_last:
return accumulator
while not is_last:
_lowercase : Optional[int] = self.infer(next(self.iterator ) ,**self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCAmelCase_ ,torch.Tensor ):
_lowercase : Any = processed
else:
_lowercase : int = list(processed.keys() )[0]
_lowercase : str = processed[key]
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : str = len(UpperCAmelCase_ )
else:
_lowercase : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowercase : Any = observed_batch_size
_lowercase : List[str] = processed
_lowercase : Union[str, Any] = 0
while self._loader_batch_index < self.loader_batch_size:
_lowercase : Optional[int] = self.loader_batch_item()
_lowercase : List[str] = item.pop("""is_last""" )
accumulator.append(UpperCAmelCase_ )
if is_last:
return accumulator
else:
_lowercase : List[Any] = processed
_lowercase : List[str] = item.pop("""is_last""" )
accumulator.append(UpperCAmelCase_ )
return accumulator
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[int] = dataset
_lowercase : Optional[int] = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self ,UpperCAmelCase_ ):
return self.dataset[i][self.key]
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[Any] = dataset
_lowercase : Optional[int] = keya
_lowercase : Any = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self ,UpperCAmelCase_ ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 363 |
"""simple docstring"""
UpperCAmelCase: str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCAmelCase: Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCAmelCase: int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 336 | 0 |
"""simple docstring"""
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
_lowercase : Optional[Any] = number_of_bytes // partitions
_lowercase : List[Any] = []
for i in range(__UpperCAmelCase ):
_lowercase : List[str] = i * bytes_per_partition + 1
_lowercase : List[str] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
UpperCAmelCase: List[Any] = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
def constraint_to_multiple_of(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=None ):
_lowercase : Union[str, Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowercase : str = math.floor(val / multiple ) * multiple
if x < min_val:
_lowercase : Dict = math.ceil(val / multiple ) * multiple
return x
_lowercase : List[str] = (output_size, output_size) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else output_size
_lowercase , _lowercase : List[Any] = get_image_size(__UpperCAmelCase )
_lowercase , _lowercase : Union[str, Any] = output_size
# determine new height and width
_lowercase : str = output_height / input_height
_lowercase : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowercase : str = scale_width
else:
# fit height
_lowercase : int = scale_height
_lowercase : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=__UpperCAmelCase )
_lowercase : Optional[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__UpperCAmelCase )
return (new_height, new_width)
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["pixel_values"]
def __init__( self ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = 1 / 2_55 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : List[Any] = size if size is not None else {"""height""": 3_84, """width""": 3_84}
_lowercase : str = get_size_dict(UpperCAmelCase_ )
_lowercase : Tuple = do_resize
_lowercase : Any = size
_lowercase : List[Any] = keep_aspect_ratio
_lowercase : Any = ensure_multiple_of
_lowercase : str = resample
_lowercase : Optional[Any] = do_rescale
_lowercase : List[Any] = rescale_factor
_lowercase : Union[str, Any] = do_normalize
_lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = PILImageResampling.BICUBIC ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
_lowercase : Optional[Any] = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_lowercase : Dict = get_resize_output_image_size(
UpperCAmelCase_ ,output_size=(size["""height"""], size["""width"""]) ,keep_aspect_ratio=UpperCAmelCase_ ,multiple=UpperCAmelCase_ ,)
return resize(UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return rescale(UpperCAmelCase_ ,scale=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return normalize(UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = ChannelDimension.FIRST ,**UpperCAmelCase_ ,):
_lowercase : Any = do_resize if do_resize is not None else self.do_resize
_lowercase : List[str] = size if size is not None else self.size
_lowercase : int = get_size_dict(UpperCAmelCase_ )
_lowercase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowercase : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowercase : List[str] = resample if resample is not None else self.resample
_lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : str = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_lowercase : int = image_std if image_std is not None else self.image_std
_lowercase : Union[str, Any] = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowercase : int = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
_lowercase : Union[str, Any] = [self.resize(image=UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ) for image in images]
if do_rescale:
_lowercase : int = [self.rescale(image=UpperCAmelCase_ ,scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
_lowercase : str = [self.normalize(image=UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ) for image in images]
_lowercase : Tuple = [to_channel_dimension_format(UpperCAmelCase_ ,UpperCAmelCase_ ) for image in images]
_lowercase : int = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase_ ,tensor_type=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(UpperCAmelCase_ ):
_lowercase : Tuple = target_sizes.numpy()
_lowercase : Optional[Any] = []
for idx in range(len(UpperCAmelCase_ ) ):
_lowercase : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=UpperCAmelCase_ )
_lowercase : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase_ )
else:
_lowercase : Union[str, Any] = logits.argmax(dim=1 )
_lowercase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 336 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
_lowercase : Optional[int] = str(bin(__UpperCAmelCase ) )[2:] # remove the leading "0b"
_lowercase : Any = str(bin(__UpperCAmelCase ) )[2:]
_lowercase : str = max(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCAmelCase ) , b_binary.zfill(__UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCAmelCase: Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCAmelCase: Tuple = [0, 25, 50]
UpperCAmelCase: List[Any] = [25, 50, 75]
UpperCAmelCase: Optional[int] = fuzz.membership.trimf(X, abca)
UpperCAmelCase: Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCAmelCase: List[Any] = np.ones(75)
UpperCAmelCase: Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCAmelCase: str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCAmelCase: List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCAmelCase: int = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCAmelCase: int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCAmelCase: List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCAmelCase: int = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 336 | 0 |
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase: Any = logging.get_logger(__name__)
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ["input_values", "attention_mask"]
def __init__( self ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = 1_60_00 ,UpperCAmelCase_ = 0.0 ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 80 ,UpperCAmelCase_ = 16 ,UpperCAmelCase_ = 64 ,UpperCAmelCase_ = "hann_window" ,UpperCAmelCase_ = 1.0 ,UpperCAmelCase_ = 80 ,UpperCAmelCase_ = 76_00 ,UpperCAmelCase_ = 1E-10 ,UpperCAmelCase_ = 2 ,UpperCAmelCase_ = True ,**UpperCAmelCase_ ,):
super().__init__(feature_size=UpperCAmelCase_ ,sampling_rate=UpperCAmelCase_ ,padding_value=UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : List[str] = do_normalize
_lowercase : Any = return_attention_mask
_lowercase : str = num_mel_bins
_lowercase : Dict = hop_length
_lowercase : Dict = win_length
_lowercase : int = win_function
_lowercase : Any = frame_signal_scale
_lowercase : Any = fmin
_lowercase : Union[str, Any] = fmax
_lowercase : Union[str, Any] = mel_floor
_lowercase : Any = reduction_factor
_lowercase : List[str] = win_length * sampling_rate // 10_00
_lowercase : Dict = hop_length * sampling_rate // 10_00
_lowercase : Optional[Any] = optimal_fft_length(self.sample_size )
_lowercase : List[Any] = (self.n_fft // 2) + 1
_lowercase : Any = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=UpperCAmelCase_ )
_lowercase : Dict = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.num_mel_bins ,min_frequency=self.fmin ,max_frequency=self.fmax ,sampling_rate=self.sampling_rate ,norm="""slaney""" ,mel_scale="""slaney""" ,)
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" ,UpperCAmelCase_ ,)
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" ,UpperCAmelCase_ ,)
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCamelCase__ ( UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = 0.0 ):
if attention_mask is not None:
_lowercase : Any = np.array(UpperCAmelCase_ ,np.intaa )
_lowercase : List[Any] = []
for vector, length in zip(UpperCAmelCase_ ,attention_mask.sum(-1 ) ):
_lowercase : List[str] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_lowercase : List[str] = padding_value
normed_input_values.append(UpperCAmelCase_ )
else:
_lowercase : List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,):
_lowercase : List[str] = spectrogram(
UpperCAmelCase_ ,window=self.window ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,mel_filters=self.mel_filters ,mel_floor=self.mel_floor ,log_mel="""log10""" ,)
return log_mel_spec.T
def __call__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""" )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if audio is not None:
_lowercase : Tuple = self._process_audio(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ,)
else:
_lowercase : Dict = None
if audio_target is not None:
_lowercase : Any = self._process_audio(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ,)
if inputs is None:
return inputs_target
else:
_lowercase : List[Any] = inputs_target["""input_values"""]
_lowercase : Dict = inputs_target.get("""attention_mask""" )
if decoder_attention_mask is not None:
_lowercase : Union[str, Any] = decoder_attention_mask
return inputs
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
_lowercase : Any = isinstance(UpperCAmelCase_ ,np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
_lowercase : Tuple = is_batched_numpy or (
isinstance(UpperCAmelCase_ ,(list, tuple) ) and (isinstance(speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Union[str, Any] = [np.asarray(UpperCAmelCase_ ,dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(UpperCAmelCase_ ,np.ndarray ):
_lowercase : int = np.asarray(UpperCAmelCase_ ,dtype=np.floataa )
elif isinstance(UpperCAmelCase_ ,np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
_lowercase : Union[str, Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : Dict = [speech]
# needed to make pad() work on spectrogram inputs
_lowercase : Dict = self.feature_size
# convert into correct format for padding
if is_target:
_lowercase : str = [self._extract_mel_features(UpperCAmelCase_ ) for waveform in speech]
_lowercase : Any = BatchFeature({"""input_values""": features} )
_lowercase : Any = self.num_mel_bins
else:
_lowercase : Optional[Any] = BatchFeature({"""input_values""": speech} )
_lowercase : List[Any] = self.pad(
UpperCAmelCase_ ,padding=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
_lowercase : Dict = feature_size_hack
# convert input values to correct format
_lowercase : Tuple = padded_inputs["""input_values"""]
if not isinstance(input_values[0] ,np.ndarray ):
_lowercase : Dict = [np.asarray(UpperCAmelCase_ ,dtype=np.floataa ) for array in input_values]
elif (
not isinstance(UpperCAmelCase_ ,np.ndarray )
and isinstance(input_values[0] ,np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
_lowercase : Dict = [array.astype(np.floataa ) for array in input_values]
elif isinstance(UpperCAmelCase_ ,np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
_lowercase : int = input_values.astype(np.floataa )
# convert attention_mask to correct format
_lowercase : Dict = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
_lowercase : Union[str, Any] = [np.asarray(UpperCAmelCase_ ,dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
_lowercase : Union[str, Any] = (
attention_mask
if self._get_padding_strategies(UpperCAmelCase_ ,max_length=UpperCAmelCase_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_lowercase : Union[str, Any] = self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""] ,attention_mask=UpperCAmelCase_ ,padding_value=self.padding_value )
if return_tensors is not None:
_lowercase : Optional[int] = padded_inputs.convert_to_tensors(UpperCAmelCase_ )
return padded_inputs
def lowerCamelCase__ ( self ):
_lowercase : Any = super().to_dict()
# Don't serialize these as they are derived from the other properties.
_lowercase : Union[str, Any] = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output
| 366 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : str = tempfile.mkdtemp()
# fmt: off
_lowercase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowercase : Optional[int] = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
_lowercase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
_lowercase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_lowercase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
_lowercase : Dict = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
_lowercase : List[Any] = os.path.join(self.tmpdirname ,UpperCAmelCase_ )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,**UpperCAmelCase_ ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
_lowercase : Tuple = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_tokenizer()
_lowercase : List[Any] = self.get_rust_tokenizer()
_lowercase : List[Any] = self.get_image_processor()
_lowercase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
_lowercase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCAmelCase_ )
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
_lowercase : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_lowercase : Optional[int] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
_lowercase : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[int] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : int = self.prepare_image_inputs()
_lowercase : str = image_processor(UpperCAmelCase_ ,return_tensors="""np""" )
_lowercase : int = processor(images=UpperCAmelCase_ ,return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : Optional[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : List[Any] = """lower newer"""
_lowercase : Any = processor(text=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : str = """lower newer"""
_lowercase : List[Any] = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowerCamelCase__ ( self ):
_lowercase : Dict = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : int = processor.batch_decode(UpperCAmelCase_ )
_lowercase : Tuple = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.get_image_processor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ ,image_processor=UpperCAmelCase_ )
_lowercase : Optional[Any] = """lower newer"""
_lowercase : Any = self.prepare_image_inputs()
_lowercase : Optional[int] = processor(text=UpperCAmelCase_ ,images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 336 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.