code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import re
def __A ( lowerCamelCase_ ):
"""simple docstring"""
return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" , str_ )]
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE : Tuple = split_input(lowerCamelCase_ )
if upper:
SCREAMING_SNAKE_CASE : Optional[int] = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
SCREAMING_SNAKE_CASE : Any = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __A ( lowerCamelCase_ ):
"""simple docstring"""
return to_simple_case(lowerCamelCase_ )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE : str = to_simple_case(lowerCamelCase_ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return to_complex_case(lowerCamelCase_ , lowerCamelCase_ , """_""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return to_complex_case(lowerCamelCase_ , lowerCamelCase_ , """-""" )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 323 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Tuple = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i, rect in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 )
target.move_to(lowerCamelCase_ )
model_arr.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCamelCase_ )
self.add(*lowerCamelCase_ , *lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE : Optional[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 )
input.set_fill(lowerCamelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 )
self.play(Write(lowerCamelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.02 )
self.play(MoveToTarget(lowerCamelCase_ ) )
self.play(FadeOut(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE : Optional[int] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE : Any = AnimationGroup(
FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowerCamelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE : Optional[Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = a_c
SCREAMING_SNAKE_CASE : Optional[Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) )
self.wait()
| 323 | 1 |
'''simple docstring'''
import copy
import re
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''hp'''
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = None
@classmethod
def lowerCamelCase_ ( cls : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = prefix
SCREAMING_SNAKE_CASE : Dict = defaults
cls.build_naming_info()
@staticmethod
def lowerCamelCase_ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
if len(lowerCamelCase_ ) == 0:
return ""
SCREAMING_SNAKE_CASE : Optional[Any] = None
if any(char.isdigit() for char in word ):
raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowerCamelCase_ ) + 1 ):
SCREAMING_SNAKE_CASE : Dict = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCamelCase_ : str ):
SCREAMING_SNAKE_CASE : Optional[int] = """"""
while integer != 0:
SCREAMING_SNAKE_CASE : Optional[Any] = chr(ord("""A""" ) + integer % 10 ) + s
integer //= 10
return s
SCREAMING_SNAKE_CASE : List[str] = 0
while True:
SCREAMING_SNAKE_CASE : Optional[Any] = word + """#""" + int_to_alphabetic(lowerCamelCase_ )
if sword in info["reverse_short_word"]:
continue
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = sword
break
SCREAMING_SNAKE_CASE : int = short_word
SCREAMING_SNAKE_CASE : Union[str, Any] = word
return short_word
@staticmethod
def lowerCamelCase_ ( lowerCamelCase_ : int , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = param_name.split("""_""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = [TrialShortNamer.shortname_for_word(lowerCamelCase_ , lowerCamelCase_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
SCREAMING_SNAKE_CASE : Dict = ["""""", """_"""]
for separator in separators:
SCREAMING_SNAKE_CASE : Any = separator.join(lowerCamelCase_ )
if shortname not in info["reverse_short_param"]:
SCREAMING_SNAKE_CASE : List[str] = shortname
SCREAMING_SNAKE_CASE : Union[str, Any] = param_name
return shortname
return param_name
@staticmethod
def lowerCamelCase_ ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = TrialShortNamer.shortname_for_key(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = short_name
SCREAMING_SNAKE_CASE : List[Any] = param_name
@classmethod
def lowerCamelCase_ ( cls : str ):
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
SCREAMING_SNAKE_CASE : Optional[int] = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
SCREAMING_SNAKE_CASE : Dict = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = info
@classmethod
def lowerCamelCase_ ( cls : Dict , lowerCamelCase_ : str ):
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
SCREAMING_SNAKE_CASE : Optional[Any] = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
SCREAMING_SNAKE_CASE : List[str] = cls.NAMING_INFO["""short_param"""][k]
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Any = 1 if v else 0
SCREAMING_SNAKE_CASE : Union[str, Any] = """""" if isinstance(lowerCamelCase_ , (int, float) ) else """-"""
SCREAMING_SNAKE_CASE : Dict = f'''{key}{sep}{v}'''
name.append(lowerCamelCase_ )
return "_".join(lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : Dict , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
SCREAMING_SNAKE_CASE : Union[str, Any] = []
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = repr.split("""_""" )
SCREAMING_SNAKE_CASE : int = {}
for value in values:
if "-" in value:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = value.split("""-""" )
else:
SCREAMING_SNAKE_CASE : str = re.sub("""[0-9.]""" , """""" , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = float(re.sub("""[^0-9.]""" , """""" , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : str = cls.NAMING_INFO["""reverse_short_param"""][p_k]
SCREAMING_SNAKE_CASE : List[Any] = p_v
for k in cls.DEFAULTS:
if k not in parameters:
SCREAMING_SNAKE_CASE : Any = cls.DEFAULTS[k]
return parameters
| 323 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE : dict[str, str | None] = {}
SCREAMING_SNAKE_CASE : List[str] = source_vertex
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex}
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE : str = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vertex
queue.append(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE : Tuple = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowerCamelCase_ )
return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}'''
if __name__ == "__main__":
__UpperCAmelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 323 | 1 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__UpperCAmelCase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__UpperCAmelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__UpperCAmelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = len([g for position, g in enumerate(lowerCamelCase_ ) if g == main_target[position]] )
return (item, float(lowerCamelCase_ ))
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = random.randint(0 , len(lowerCamelCase_ ) - 1 )
SCREAMING_SNAKE_CASE : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE : List[Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = list(lowerCamelCase_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE : str = random.choice(lowerCamelCase_ )
return "".join(lowerCamelCase_ )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE : Tuple = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE : int = 10 if child_n >= 10 else child_n
for _ in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = population_score[random.randint(0 , lowerCamelCase_ )][0]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = crossover(parent_a[0] , lowerCamelCase_ )
# Append new string to the population list.
pop.append(mutate(lowerCamelCase_ , lowerCamelCase_ ) )
pop.append(mutate(lowerCamelCase_ , lowerCamelCase_ ) )
return pop
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE : Tuple = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowerCamelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE : List[Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE : List[Any] = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowerCamelCase_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE : Any = []
for _ in range(lowerCamelCase_ ):
population.append("""""".join([random.choice(lowerCamelCase_ ) for i in range(len(lowerCamelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCamelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE : int = [evaluate(lowerCamelCase_ , lowerCamelCase_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE : Union[str, Any] = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] , reverse=lowerCamelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE : int = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCamelCase_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE : Union[str, Any] = [
(item, score / len(lowerCamelCase_ )) for item, score in population_score
]
# This is selection
for i in range(lowerCamelCase_ ):
population.extend(select(population_score[int(lowerCamelCase_ )] , lowerCamelCase_ , lowerCamelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCamelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
__UpperCAmelCase = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
__UpperCAmelCase = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 323 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__UpperCAmelCase = 0
__UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__UpperCAmelCase = tuple[int, int]
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Node | None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = pos_x
SCREAMING_SNAKE_CASE : Any = pos_y
SCREAMING_SNAKE_CASE : Optional[int] = (pos_y, pos_x)
SCREAMING_SNAKE_CASE : Tuple = goal_x
SCREAMING_SNAKE_CASE : List[str] = goal_y
SCREAMING_SNAKE_CASE : Optional[Any] = g_cost
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : int = self.calculate_heuristic()
SCREAMING_SNAKE_CASE : Tuple = self.g_cost + self.h_cost
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.pos_x - self.goal_x
SCREAMING_SNAKE_CASE : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase_ ) + abs(lowerCamelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Optional[Any] , lowerCamelCase_ : Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [self.start]
SCREAMING_SNAKE_CASE : list[Node] = []
SCREAMING_SNAKE_CASE : str = False
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE : Optional[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase_ )
self.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_successors(lowerCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase_ )
else:
self.open_nodes.append(lowerCamelCase_ )
return [self.start.pos]
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
for action in delta:
SCREAMING_SNAKE_CASE : Dict = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) )
return successors
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Node | None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = node
SCREAMING_SNAKE_CASE : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE : Optional[Any] = current_node.parent
path.reverse()
return path
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = False
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
SCREAMING_SNAKE_CASE : List[str] = self.fwd_astar.open_nodes.pop(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase_ , lowerCamelCase_ )
self.fwd_astar.closed_nodes.append(lowerCamelCase_ )
self.bwd_astar.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = current_bwd_node
SCREAMING_SNAKE_CASE : Any = current_fwd_node
SCREAMING_SNAKE_CASE : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase_ )
else:
astar.open_nodes.append(lowerCamelCase_ )
return [self.fwd_astar.start.pos]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Node , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.fwd_astar.retrace_path(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.bwd_astar.retrace_path(lowerCamelCase_ )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__UpperCAmelCase = (0, 0)
__UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__UpperCAmelCase = time.time()
__UpperCAmelCase = AStar(init, goal)
__UpperCAmelCase = a_star.search()
__UpperCAmelCase = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
__UpperCAmelCase = time.time()
__UpperCAmelCase = BidirectionalAStar(init, goal)
__UpperCAmelCase = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 323 | 1 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__UpperCAmelCase = ["""text""", """image""", """audio"""]
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((5_12, 5_12) ) )
elif input_type == "audio":
inputs.append(torch.ones(30_00 ) )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
inputs.append(create_inputs(lowerCamelCase_ ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = []
for output in outputs:
if isinstance(lowerCamelCase_ , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(lowerCamelCase_ , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(lowerCamelCase_ , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class UpperCamelCase__ :
"""simple docstring"""
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE : int = self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCamelCase_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE : List[Any] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE : Dict = self.tool(*lowerCamelCase_ )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE : Optional[int] = [outputs]
self.assertListEqual(output_types(lowerCamelCase_ ) , self.tool.outputs )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE : Optional[int] = self.tool(*lowerCamelCase_ )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Any = [outputs]
self.assertEqual(len(lowerCamelCase_ ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCamelCase_ , self.tool.outputs ):
SCREAMING_SNAKE_CASE : Tuple = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE : Any = []
for _input, input_type in zip(lowerCamelCase_ , self.tool.inputs ):
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE : List[Any] = self.tool(*lowerCamelCase_ )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = [outputs]
self.assertEqual(len(lowerCamelCase_ ) , len(self.tool.outputs ) )
| 323 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''efficientnet'''
def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : int = width_coefficient
SCREAMING_SNAKE_CASE : List[str] = depth_coefficient
SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisor
SCREAMING_SNAKE_CASE : List[str] = kernel_sizes
SCREAMING_SNAKE_CASE : Dict = in_channels
SCREAMING_SNAKE_CASE : List[str] = out_channels
SCREAMING_SNAKE_CASE : Any = depthwise_padding
SCREAMING_SNAKE_CASE : Dict = strides
SCREAMING_SNAKE_CASE : Optional[Any] = num_block_repeats
SCREAMING_SNAKE_CASE : Any = expand_ratios
SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dim
SCREAMING_SNAKE_CASE : List[str] = pooling_type
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = batch_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum
SCREAMING_SNAKE_CASE : Dict = dropout_rate
SCREAMING_SNAKE_CASE : int = drop_connect_rate
SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowerCamelCase_ ) * 4
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return 1e-5
| 323 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCAmelCase = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = PegasusConfig
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = '''gelu'''
def __init__( self : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple=13 , lowerCamelCase_ : Tuple=7 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Any=99 , lowerCamelCase_ : Any=32 , lowerCamelCase_ : Tuple=5 , lowerCamelCase_ : Optional[int]=4 , lowerCamelCase_ : int=37 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : List[str]=20 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Any=1 , lowerCamelCase_ : Tuple=0 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : str = seq_length
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = eos_token_id
SCREAMING_SNAKE_CASE : Tuple = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
SCREAMING_SNAKE_CASE : Any = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE : Tuple = np.concatenate([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE : Tuple = prepare_pegasus_inputs_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return config, inputs_dict
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 20
SCREAMING_SNAKE_CASE : int = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model.encode(inputs_dict["""input_ids"""] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
SCREAMING_SNAKE_CASE : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
SCREAMING_SNAKE_CASE : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE : int = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Any = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = model.decode(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 20
SCREAMING_SNAKE_CASE : List[str] = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model.encode(inputs_dict["""input_ids"""] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
SCREAMING_SNAKE_CASE : List[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
SCREAMING_SNAKE_CASE : Tuple = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Any = model.decode(lowerCamelCase_ , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , ):
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE : Dict = np.not_equal(lowerCamelCase_ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = FlaxPegasusModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model_class(lowerCamelCase_ )
@jax.jit
def encode_jitted(lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any]=None , **lowerCamelCase_ : Optional[Any] ):
return model.encode(input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
with self.subTest("""JIT Enabled""" ):
SCREAMING_SNAKE_CASE : List[Any] = encode_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : Union[str, Any] = encode_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : List[str] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
SCREAMING_SNAKE_CASE : int = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] ):
return model.decode(
decoder_input_ids=lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , encoder_outputs=lowerCamelCase_ , )
with self.subTest("""JIT Enabled""" ):
SCREAMING_SNAKE_CASE : int = decode_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : Optional[int] = decode_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = np.ones((1, 1) )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
SCREAMING_SNAKE_CASE : int = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
SCREAMING_SNAKE_CASE : List[str] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
SCREAMING_SNAKE_CASE : Optional[Any] = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
SCREAMING_SNAKE_CASE : int = tokenizer(lowerCamelCase_ , return_tensors="""np""" , truncation=lowerCamelCase_ , max_length=5_12 , padding=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = model.generate(**lowerCamelCase_ , num_beams=2 ).sequences
SCREAMING_SNAKE_CASE : List[str] = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
assert tgt_text == decoded
| 323 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(lowercase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {}
SCREAMING_SNAKE_CASE : List[Any] = {}
if prompt is not None:
SCREAMING_SNAKE_CASE : List[Any] = prompt
if generate_kwargs is not None:
SCREAMING_SNAKE_CASE : Optional[int] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
SCREAMING_SNAKE_CASE : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ : Any ):
'''simple docstring'''
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(lowerCamelCase_ )
if prompt is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.model_type
if model_type == "git":
SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : str = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids
SCREAMING_SNAKE_CASE : Optional[int] = [self.tokenizer.cls_token_id] + input_ids
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCamelCase_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
SCREAMING_SNAKE_CASE : Optional[Any] = None
return model_inputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any]=None ):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , lowerCamelCase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
SCREAMING_SNAKE_CASE : List[str] = None
if generate_kwargs is None:
SCREAMING_SNAKE_CASE : int = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop(self.model.main_input_name )
SCREAMING_SNAKE_CASE : Any = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ )
return model_outputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for output_ids in model_outputs:
SCREAMING_SNAKE_CASE : List[Any] = {
"""generated_text""": self.tokenizer.decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , )
}
records.append(lowerCamelCase_ )
return records
| 323 | 1 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any]=14 , lowerCamelCase_ : Tuple=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Dict=True , lowerCamelCase_ : int=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[int]=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : Union[str, Any]=5 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : Tuple=37 , lowerCamelCase_ : Dict="gelu" , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : Optional[Any]=5_12 , lowerCamelCase_ : Optional[int]=16 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Tuple=0.02 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : str=4 , lowerCamelCase_ : Dict=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : str = use_token_type_ids
SCREAMING_SNAKE_CASE : List[Any] = use_input_mask
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = use_mc_token_ids
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE : int = num_choices
SCREAMING_SNAKE_CASE : Union[str, Any] = scope
SCREAMING_SNAKE_CASE : Optional[int] = self.vocab_size - 1
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Any = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : int = None
if self.use_mc_token_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : List[str] = self.get_config()
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , *lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = CTRLModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , head_mask=lowerCamelCase_ )
model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , *lowerCamelCase_ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = CTRLLMHeadModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : str , *lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = CTRLForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (CTRLLMHeadModel,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : Any ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = CTRLModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=lowerCamelCase_ , n_embd=37 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[str] = CTRLModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=lowerCamelCase_ ) # Legal the president is
SCREAMING_SNAKE_CASE : Tuple = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
SCREAMING_SNAKE_CASE : List[Any] = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ )
self.assertListEqual(output_ids[0].tolist() , lowerCamelCase_ )
| 323 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCamelCase_ )
return config
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0]
SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1]
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = 1
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = scheduler.timesteps
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCamelCase_ ):
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Dict = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0]
with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 323 | 1 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetInfosDict.from_directory(lowerCamelCase_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ),
] , )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = str(lowerCamelCase_ )
dataset_info.write_to_directory(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = DatasetInfo.from_directory(lowerCamelCase_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCamelCase_ , """dataset_info.json""" ) )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
SCREAMING_SNAKE_CASE : str = dataset_info._to_yaml_dict()
assert sorted(lowerCamelCase_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
SCREAMING_SNAKE_CASE : Union[str, Any] = yaml.safe_dump(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = yaml.safe_load(lowerCamelCase_ )
assert dataset_info_yaml_dict == reloaded
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = DatasetInfo()
SCREAMING_SNAKE_CASE : List[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=42 ),
"""v2""": DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = str(lowerCamelCase_ )
dataset_infos_dict.write_to_directory(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = DatasetInfosDict.from_directory(lowerCamelCase_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
SCREAMING_SNAKE_CASE : int = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
SCREAMING_SNAKE_CASE : Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCamelCase_ , """README.md""" ) )
| 323 |
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE : Optional[int] = Text(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE : int = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 323 | 1 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : Any=13 , lowerCamelCase_ : Optional[Any]=30 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : int=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : Any=5 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : Optional[Any]=37 , lowerCamelCase_ : Optional[int]="gelu" , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Optional[int]=10 , lowerCamelCase_ : Union[str, Any]=0.02 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Dict=0.6 , lowerCamelCase_ : Any=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : List[Any] = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = mask_ratio
SCREAMING_SNAKE_CASE : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE : Optional[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ViTMAEModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = ViTMAEForPreTraining(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = (self.image_size // self.patch_size) ** 2
SCREAMING_SNAKE_CASE : Tuple = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Tuple = ViTMAEForPreTraining(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ViTMAEModelTester(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int ):
'''simple docstring'''
np.random.seed(2 )
SCREAMING_SNAKE_CASE : List[str] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
SCREAMING_SNAKE_CASE : int = pt_noise
super().check_pt_tf_models(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs[0].cpu().numpy()
SCREAMING_SNAKE_CASE : Dict = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model_class.from_pretrained(lowerCamelCase_ )
model.to(lowerCamelCase_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
# Make sure we don't have nans
SCREAMING_SNAKE_CASE : Optional[int] = after_outputs[0].cpu().numpy()
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase_ , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMAEModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
np.random.seed(2 )
SCREAMING_SNAKE_CASE : Tuple = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMAEConfig()
SCREAMING_SNAKE_CASE : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE : List[str] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**lowerCamelCase_ , noise=torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ ) )
# verify the logits
SCREAMING_SNAKE_CASE : Dict = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCamelCase_ ) , atol=1e-4 ) )
| 323 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 3_84
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE : List[str] = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE : Any = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE : int = 1_28
SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE : Optional[Any] = 12
SCREAMING_SNAKE_CASE : str = 5_12
elif "large" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 1_92
SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE : Tuple = 12
SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68
# set label information
SCREAMING_SNAKE_CASE : List[str] = 1_50
SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files"""
SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
SCREAMING_SNAKE_CASE : List[str] = UperNetConfig(
backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , )
return config
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = val
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE : Dict = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :]
# fmt: on
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape
SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape
SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = x.shape[0]
SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(lowerCamelCase_ , param.shape )
SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ )
if "bn" in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" )
SCREAMING_SNAKE_CASE : Optional[Any] = val
# rename keys
SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ )
if "norm" in key:
SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify on image
SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCAmelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 323 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Any = pad_token_id
SCREAMING_SNAKE_CASE : List[Any] = max_length
SCREAMING_SNAKE_CASE : Optional[int] = vocab
SCREAMING_SNAKE_CASE : List[Any] = merges
SCREAMING_SNAKE_CASE : Tuple = BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : GPTaTokenizer , *lowerCamelCase_ : str , **lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [""" """.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab()
return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
return cls(**lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tf_tokenizer(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.ones_like(lowerCamelCase_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = pad_model_inputs(
lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 323 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
"""configuration_xlm_roberta""": [
"""XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaConfig""",
"""XLMRobertaOnnxConfig""",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""XLMRobertaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""XLMRobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaForCausalLM""",
"""XLMRobertaForMaskedLM""",
"""XLMRobertaForMultipleChoice""",
"""XLMRobertaForQuestionAnswering""",
"""XLMRobertaForSequenceClassification""",
"""XLMRobertaForTokenClassification""",
"""XLMRobertaModel""",
"""XLMRobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMRobertaForCausalLM""",
"""TFXLMRobertaForMaskedLM""",
"""TFXLMRobertaForMultipleChoice""",
"""TFXLMRobertaForQuestionAnswering""",
"""TFXLMRobertaForSequenceClassification""",
"""TFXLMRobertaForTokenClassification""",
"""TFXLMRobertaModel""",
"""TFXLMRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxXLMRobertaForMaskedLM""",
"""FlaxXLMRobertaForCausalLM""",
"""FlaxXLMRobertaForMultipleChoice""",
"""FlaxXLMRobertaForQuestionAnswering""",
"""FlaxXLMRobertaForSequenceClassification""",
"""FlaxXLMRobertaForTokenClassification""",
"""FlaxXLMRobertaModel""",
"""FlaxXLMRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths
SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train"""
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Union[str, Any] = streaming
SCREAMING_SNAKE_CASE : Optional[int] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : int = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Tuple = streaming
SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
| 323 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=14 , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : int=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Union[str, Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=5_12 , lowerCamelCase_ : Union[str, Any]=0.02 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = rotary_dim
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = vocab_size - 1
SCREAMING_SNAKE_CASE : str = vocab_size - 1
SCREAMING_SNAKE_CASE : List[Any] = vocab_size - 1
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[str] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 20
SCREAMING_SNAKE_CASE : Any = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE : Any = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : str = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 20
SCREAMING_SNAKE_CASE : Dict = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
SCREAMING_SNAKE_CASE : str = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE : Any = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Dict = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
SCREAMING_SNAKE_CASE__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxGPTJModelTester(self )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@tooslow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id
SCREAMING_SNAKE_CASE : str = jax.jit(model.generate )
SCREAMING_SNAKE_CASE : str = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Optional[int] = pt_model_class(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = pt_model(**lowerCamelCase_ ).to_tuple()
SCREAMING_SNAKE_CASE : Any = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = fx_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = pt_model_class(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : List[Any] = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Tuple = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = pt_model(**lowerCamelCase_ ).to_tuple()
SCREAMING_SNAKE_CASE : Optional[Any] = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = pt_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 323 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = """ylacombe/bark-small"""
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : str = """en_speaker_1"""
SCREAMING_SNAKE_CASE : Optional[int] = """This is a test string"""
SCREAMING_SNAKE_CASE : Optional[int] = """speaker_embeddings_path.json"""
SCREAMING_SNAKE_CASE : List[Any] = """speaker_embeddings"""
def lowerCamelCase_ ( self : int , **lowerCamelCase_ : int ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE : int = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
SCREAMING_SNAKE_CASE : List[str] = 35
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : int = 8
SCREAMING_SNAKE_CASE : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string )
SCREAMING_SNAKE_CASE : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 323 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""OPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OPTForCausalLM""",
"""OPTModel""",
"""OPTPreTrainedModel""",
"""OPTForSequenceClassification""",
"""OPTForQuestionAnswering""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""FlaxOPTForCausalLM""",
"""FlaxOPTModel""",
"""FlaxOPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCAmelCase = logging.getLogger(__name__)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE : Dict = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE : Optional[int] = processor.get_labels()
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
SCREAMING_SNAKE_CASE : Optional[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE : Dict = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCamelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCamelCase_ , p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Any = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.evaluate()
SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowerCamelCase_ , lowerCamelCase_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowerCamelCase_ )
return results
def __A ( lowerCamelCase_ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 323 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
SCREAMING_SNAKE_CASE : str = {
"""input_ids""": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )["""last_hidden_state"""]
SCREAMING_SNAKE_CASE : Optional[Any] = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , lowerCamelCase_ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : int = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 323 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block
SCREAMING_SNAKE_CASE : int = torch.nn.Convad(
lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Any = output_channel
SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block(
lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
self.down_blocks.append(lowerCamelCase_ )
# mid
SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# out
SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU()
SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Tuple = False
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = x
SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[Any] ):
def custom_forward(*lowerCamelCase_ : List[str] ):
return module(*lowerCamelCase_ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ )
# post-process
SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : int = layers_per_block
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad(
lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] )
SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None
# mid
SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# up
SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : List[Any] = get_up_block(
lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , )
self.up_blocks.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : Dict = nn.SiLU()
SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Dict = False
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = z
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[str] ):
def custom_forward(*lowerCamelCase_ : str ):
return module(*lowerCamelCase_ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = n_e
SCREAMING_SNAKE_CASE : int = vq_embed_dim
SCREAMING_SNAKE_CASE : Tuple = beta
SCREAMING_SNAKE_CASE : Union[str, Any] = legacy
SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE : Optional[Any] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0]
SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed
SCREAMING_SNAKE_CASE : Any = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
SCREAMING_SNAKE_CASE : Optional[int] = n_e
SCREAMING_SNAKE_CASE : Any = sane_index_shape
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 )
SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE : Any = self.unknown_index
return new.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero
SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ )
return back.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : List[str] = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
if self.remap is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ )
if shape is not None:
SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parameters
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 )
SCREAMING_SNAKE_CASE : Dict = deterministic
SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = randn_tensor(
self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample
return x
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.mean
| 323 | 1 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__UpperCAmelCase = False
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = """A painting of a squirrel eating a burger """
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = pipe(
prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = pipe(
prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = """A painting of a squirrel eating a burger """
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = pipe(
prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE : Dict = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE : List[str] = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 323 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxAutoencoderKL
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : str = 3
SCREAMING_SNAKE_CASE : List[Any] = (32, 32)
SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Any = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input
return init_dict, inputs_dict
| 323 | 1 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim
SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim
SCREAMING_SNAKE_CASE : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
SCREAMING_SNAKE_CASE : str = (
nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
SCREAMING_SNAKE_CASE : Any = GPTaConfig(
vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 )
SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ):
'''simple docstring'''
return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.encode_prefix(lowerCamelCase_ )
@torch.no_grad()
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 )
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Tuple = []
for feature in features:
SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam(
input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = eos_token_id
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool )
if input_embeds is not None:
SCREAMING_SNAKE_CASE : Dict = input_embeds
else:
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log()
if scores is None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
SCREAMING_SNAKE_CASE : List[Any] = next_tokens
else:
SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] )
SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 )
else:
SCREAMING_SNAKE_CASE : Tuple = -float(np.inf )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source]
SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 )
SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source]
SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source]
SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths
SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source]
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 )
SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze()
if is_stopped.all():
break
SCREAMING_SNAKE_CASE : int = scores / seq_lengths
SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ )
# tokens tensors are already padded to max_seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order]
SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 323 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
pass
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ )
import datasets
SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
SCREAMING_SNAKE_CASE : Any = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , lowerCamelCase_ , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = """Intel/dpt-large"""
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""depth-estimation""" , model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
SCREAMING_SNAKE_CASE : str = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 323 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Dict=2_24 , lowerCamelCase_ : List[Any]=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Dict = min_resolution
SCREAMING_SNAKE_CASE : List[str] = max_resolution
SCREAMING_SNAKE_CASE : str = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean
SCREAMING_SNAKE_CASE : str = image_std
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ViTImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = EfficientFormerImageProcessorTester(self )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 323 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE__ = '''Pix2StructImageProcessor'''
SCREAMING_SNAKE_CASE__ = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = False
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __call__( self : str , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase_ : bool = True , lowerCamelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[int] = 20_48 , lowerCamelCase_ : int = 0 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , **lowerCamelCase_ : Any , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE : Dict = self.tokenizer
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
SCREAMING_SNAKE_CASE : str = self.image_processor(
lowerCamelCase_ , return_tensors=lowerCamelCase_ , max_patches=lowerCamelCase_ , **lowerCamelCase_ )
else:
# add pixel_values and bbox
SCREAMING_SNAKE_CASE : int = self.image_processor(
lowerCamelCase_ , return_tensors=lowerCamelCase_ , max_patches=lowerCamelCase_ , header_text=lowerCamelCase_ , **lowerCamelCase_ )
if text is not None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(
text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
if "attention_mask" in text_encoding:
SCREAMING_SNAKE_CASE : List[str] = text_encoding.pop("""attention_mask""" )
if "input_ids" in text_encoding:
SCREAMING_SNAKE_CASE : Union[str, Any] = text_encoding.pop("""input_ids""" )
else:
SCREAMING_SNAKE_CASE : Tuple = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase_ )
return encoding_image_processor
def lowerCamelCase_ ( self : List[Any] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : int , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 323 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCAmelCase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(lowerCamelCase_ ) )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
for coeff in reversed(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = result * x + coeff
return result
if __name__ == "__main__":
__UpperCAmelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
__UpperCAmelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 323 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 323 | 1 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__UpperCAmelCase = sys.version_info >= (3, 10)
def __A ( lowerCamelCase_=None , lowerCamelCase_=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=lowerCamelCase_ )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = field(default='''toto''' , metadata={'''help''': '''help message'''} )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = None
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''titi'''
SCREAMING_SNAKE_CASE__ = '''toto'''
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''titi'''
SCREAMING_SNAKE_CASE__ = '''toto'''
SCREAMING_SNAKE_CASE__ = 42
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = "toto"
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = BasicEnum(self.foo )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = "toto"
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''help message'''} )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = list_field(default=[] )
SCREAMING_SNAKE_CASE__ = list_field(default=[] )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = list_field(default=[] )
SCREAMING_SNAKE_CASE__ = list_field(default=[1, 2, 3] )
SCREAMING_SNAKE_CASE__ = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
SCREAMING_SNAKE_CASE__ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field()
SCREAMING_SNAKE_CASE__ = field()
SCREAMING_SNAKE_CASE__ = field()
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = field()
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = field(default='''toto''' , metadata={'''help''': '''help message'''} )
SCREAMING_SNAKE_CASE__ = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = None
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''help message'''} )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = list_field(default=[] )
SCREAMING_SNAKE_CASE__ = list_field(default=[] )
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : argparse.ArgumentParser , lowerCamelCase_ : argparse.ArgumentParser ):
'''simple docstring'''
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
SCREAMING_SNAKE_CASE : Optional[int] = {k: v for k, v in vars(lowerCamelCase_ ).items() if k != """container"""}
SCREAMING_SNAKE_CASE : Dict = {k: v for k, v in vars(lowerCamelCase_ ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , lowerCamelCase_ ) and yy.get("""choices""" , lowerCamelCase_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](lowerCamelCase_ ) , yy["""type"""](lowerCamelCase_ ) )
del xx["type"], yy["type"]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = HfArgumentParser(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowerCamelCase_ , required=lowerCamelCase_ )
expected.add_argument("""--bar""" , type=lowerCamelCase_ , required=lowerCamelCase_ )
expected.add_argument("""--baz""" , type=lowerCamelCase_ , required=lowerCamelCase_ )
expected.add_argument("""--flag""" , type=lowerCamelCase_ , default=lowerCamelCase_ , const=lowerCamelCase_ , nargs="""?""" )
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((SCREAMING_SNAKE_CASE), ) : Union[str, Any] = parser.parse_args_into_dataclasses(lowerCamelCase_ , look_for_args_file=lowerCamelCase_ )
self.assertFalse(example.flag )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = HfArgumentParser(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=lowerCamelCase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=lowerCamelCase_ , help="""help message""" )
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowerCamelCase_ , default=lowerCamelCase_ , const=lowerCamelCase_ , nargs="""?""" )
expected.add_argument("""--baz""" , type=lowerCamelCase_ , default=lowerCamelCase_ , const=lowerCamelCase_ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=lowerCamelCase_ , dest="""baz""" )
expected.add_argument("""--opt""" , type=lowerCamelCase_ , default=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCamelCase_ )
for dataclass_type in dataclass_types:
SCREAMING_SNAKE_CASE : Any = HfArgumentParser(lowerCamelCase_ )
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = parser.parse_args([] )
self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , baz=lowerCamelCase_ , opt=lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , baz=lowerCamelCase_ , opt=lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , baz=lowerCamelCase_ , opt=lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , baz=lowerCamelCase_ , opt=lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , baz=lowerCamelCase_ , opt=lowerCamelCase_ ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = HfArgumentParser(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
SCREAMING_SNAKE_CASE : Any = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = "toto"
SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=lowerCamelCase_ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=lowerCamelCase_ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowerCamelCase_ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=lowerCamelCase_ )
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = parser.parse_args([] )
self.assertEqual(
lowerCamelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(lowerCamelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=lowerCamelCase_ , type=lowerCamelCase_ )
expected.add_argument("""--bar""" , default=lowerCamelCase_ , type=lowerCamelCase_ , help="""help message""" )
expected.add_argument("""--baz""" , default=lowerCamelCase_ , type=lowerCamelCase_ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=lowerCamelCase_ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCamelCase_ )
for dataclass_type in dataclass_types:
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser(lowerCamelCase_ )
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args([] )
self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , bar=lowerCamelCase_ , baz=lowerCamelCase_ , ces=[] , des=[] ) )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(lowerCamelCase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = HfArgumentParser(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=lowerCamelCase_ , required=lowerCamelCase_ )
expected.add_argument("""--required_str""" , type=lowerCamelCase_ , required=lowerCamelCase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowerCamelCase_ , )
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = HfArgumentParser(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowerCamelCase_ , required=lowerCamelCase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowerCamelCase_ , )
expected.add_argument("""--opt""" , type=lowerCamelCase_ , default=lowerCamelCase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=lowerCamelCase_ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowerCamelCase_ )
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
SCREAMING_SNAKE_CASE : Dict = parser.parse_dict(lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Optional[int] = BasicExample(**lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(lowerCamelCase_ , parser.parse_dict , lowerCamelCase_ , allow_extra_keys=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = HfArgumentParser(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : str = os.path.join(lowerCamelCase_ , """temp_json""" )
os.mkdir(lowerCamelCase_ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
SCREAMING_SNAKE_CASE : str = BasicExample(**lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = HfArgumentParser(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : Any = os.path.join(lowerCamelCase_ , """temp_yaml""" )
os.mkdir(lowerCamelCase_ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
SCREAMING_SNAKE_CASE : Tuple = BasicExample(**lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = HfArgumentParser(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
| 323 |
'''simple docstring'''
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number | (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number & ~(1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number ^ (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 | 1 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __A ( lowerCamelCase_ ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __A ( ):
"""simple docstring"""
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
SCREAMING_SNAKE_CASE : Any = [1, 2, 3]
with pytest.raises(lowerCamelCase_ ):
with parallel_backend("""unsupported backend""" ):
map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=2 )
with pytest.raises(lowerCamelCase_ ):
with parallel_backend("""unsupported backend""" ):
map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [1, 2]
SCREAMING_SNAKE_CASE : Dict = {"""a""": 1, """b""": 2}
SCREAMING_SNAKE_CASE : Tuple = {"""a""": [1, 2], """b""": [3, 4]}
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""a""": {"""1""": 1}, """b""": 2}
SCREAMING_SNAKE_CASE : Tuple = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
SCREAMING_SNAKE_CASE : Any = [2, 3]
SCREAMING_SNAKE_CASE : Any = {"""a""": 2, """b""": 3}
SCREAMING_SNAKE_CASE : Optional[Any] = {"""a""": [2, 3], """b""": [4, 5]}
SCREAMING_SNAKE_CASE : Any = {"""a""": {"""1""": 2}, """b""": 3}
SCREAMING_SNAKE_CASE : Optional[Any] = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa
| 323 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=14 , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : int=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Union[str, Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=5_12 , lowerCamelCase_ : Union[str, Any]=0.02 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = rotary_dim
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = vocab_size - 1
SCREAMING_SNAKE_CASE : str = vocab_size - 1
SCREAMING_SNAKE_CASE : List[Any] = vocab_size - 1
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[str] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 20
SCREAMING_SNAKE_CASE : Any = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE : Any = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : str = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 20
SCREAMING_SNAKE_CASE : Dict = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
SCREAMING_SNAKE_CASE : str = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE : Any = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Dict = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
SCREAMING_SNAKE_CASE__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxGPTJModelTester(self )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@tooslow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id
SCREAMING_SNAKE_CASE : str = jax.jit(model.generate )
SCREAMING_SNAKE_CASE : str = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Optional[int] = pt_model_class(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = pt_model(**lowerCamelCase_ ).to_tuple()
SCREAMING_SNAKE_CASE : Any = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = fx_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = pt_model_class(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : List[Any] = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Tuple = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = pt_model(**lowerCamelCase_ ).to_tuple()
SCREAMING_SNAKE_CASE : Optional[Any] = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = pt_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 323 | 1 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__UpperCAmelCase = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
__UpperCAmelCase = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
__UpperCAmelCase = r"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 0.0
for i, j in zip(lowerCamelCase_ , lowerCamelCase_ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCamelCase_ , lowerCamelCase_ ) else 0.0
SCREAMING_SNAKE_CASE : str = n_correct / len(lowerCamelCase_ )
return {
"accuracy": accuracy,
}
| 323 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim
SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim
SCREAMING_SNAKE_CASE : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
SCREAMING_SNAKE_CASE : str = (
nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
SCREAMING_SNAKE_CASE : Any = GPTaConfig(
vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 )
SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ):
'''simple docstring'''
return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.encode_prefix(lowerCamelCase_ )
@torch.no_grad()
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 )
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Tuple = []
for feature in features:
SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam(
input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = eos_token_id
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool )
if input_embeds is not None:
SCREAMING_SNAKE_CASE : Dict = input_embeds
else:
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log()
if scores is None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
SCREAMING_SNAKE_CASE : List[Any] = next_tokens
else:
SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] )
SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 )
else:
SCREAMING_SNAKE_CASE : Tuple = -float(np.inf )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source]
SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 )
SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source]
SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source]
SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths
SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source]
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 )
SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze()
if is_stopped.all():
break
SCREAMING_SNAKE_CASE : int = scores / seq_lengths
SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ )
# tokens tensors are already padded to max_seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order]
SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 323 | 1 |
'''simple docstring'''
__UpperCAmelCase = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__UpperCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def __A ( lowerCamelCase_ ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = """Morse code here!"""
print(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = encrypt(lowerCamelCase_ )
print(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = decrypt(lowerCamelCase_ )
print(lowerCamelCase_ )
if __name__ == "__main__":
main()
| 323 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git_vision_model'''
def __init__( self : int , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Optional[Any]="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = hidden_act
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git'''
def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=1_01 , lowerCamelCase_ : Optional[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
if vision_config is None:
SCREAMING_SNAKE_CASE : Any = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
SCREAMING_SNAKE_CASE : int = num_image_with_embedding
SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
SCREAMING_SNAKE_CASE : str = eos_token_id
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 323 | 1 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any]=13 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Optional[int]=24 , lowerCamelCase_ : Any=16 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Optional[Any]=32 , lowerCamelCase_ : Optional[Any]=5 , lowerCamelCase_ : Any=4 , lowerCamelCase_ : Optional[Any]=37 , lowerCamelCase_ : str="gelu" , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : List[str]=10 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : Dict=2 , lowerCamelCase_ : Tuple=2 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : Any = patch_size
SCREAMING_SNAKE_CASE : List[str] = max_length
SCREAMING_SNAKE_CASE : int = num_mel_bins
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_labels
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = scope
SCREAMING_SNAKE_CASE : str = frequency_stride
SCREAMING_SNAKE_CASE : Optional[int] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
SCREAMING_SNAKE_CASE : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
SCREAMING_SNAKE_CASE : Optional[int] = (self.max_length - self.patch_size) // self.time_stride + 1
SCREAMING_SNAKE_CASE : Optional[int] = frequency_out_dimension * time_out_dimension
SCREAMING_SNAKE_CASE : str = num_patches + 2
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, input_values, labels
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ASTModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : str = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[Any] = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : str , lowerCamelCase_ : Dict ):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ASTModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[int] = ["""input_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = ASTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = torchaudio.load(lowerCamelCase_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.default_feature_extractor
SCREAMING_SNAKE_CASE : Optional[Any] = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.default_feature_extractor
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_audio()
SCREAMING_SNAKE_CASE : str = audio.squeeze().numpy()
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(lowerCamelCase_ , sampling_rate=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
| 323 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Tuple = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i, rect in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 )
target.move_to(lowerCamelCase_ )
model_arr.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCamelCase_ )
self.add(*lowerCamelCase_ , *lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE : Optional[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 )
input.set_fill(lowerCamelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 )
self.play(Write(lowerCamelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.02 )
self.play(MoveToTarget(lowerCamelCase_ ) )
self.play(FadeOut(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE : Optional[int] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE : Any = AnimationGroup(
FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowerCamelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE : Optional[Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = a_c
SCREAMING_SNAKE_CASE : Optional[Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) )
self.wait()
| 323 | 1 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = image.size
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE : Dict = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
SCREAMING_SNAKE_CASE : int = np.array(lowerCamelCase_ ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE : Union[str, Any] = image[None].transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE : Optional[int] = torch.from_numpy(lowerCamelCase_ )
return 2.0 * image - 1.0
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : VQModel , lowerCamelCase_ : UNetaDModel , lowerCamelCase_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self : Optional[Any] , lowerCamelCase_ : Union[torch.Tensor, PIL.Image.Image] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : Optional[int] = 1_00 , lowerCamelCase_ : Optional[float] = 0.0 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : List[str] = 1
elif isinstance(lowerCamelCase_ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Optional[int] = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCamelCase_ )}''' )
if isinstance(lowerCamelCase_ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : str = preprocess(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
SCREAMING_SNAKE_CASE : str = (batch_size, self.unet.config.in_channels // 2, height, width)
SCREAMING_SNAKE_CASE : Tuple = next(self.unet.parameters() ).dtype
SCREAMING_SNAKE_CASE : List[Any] = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = image.to(device=self.device , dtype=lowerCamelCase_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCamelCase_ , device=self.device )
SCREAMING_SNAKE_CASE : Dict = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE : Any = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE : List[str] = {}
if accepts_eta:
SCREAMING_SNAKE_CASE : Any = eta
for t in self.progress_bar(lowerCamelCase_ ):
# concat latents and low resolution image in the channel dimension.
SCREAMING_SNAKE_CASE : Tuple = torch.cat([latents, image] , dim=1 )
SCREAMING_SNAKE_CASE : int = self.scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# predict the noise residual
SCREAMING_SNAKE_CASE : Tuple = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
# decode the image latents with the VQVAE
SCREAMING_SNAKE_CASE : Tuple = self.vqvae.decode(lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : Optional[Any] = torch.clamp(lowerCamelCase_ , -1.0 , 1.0 )
SCREAMING_SNAKE_CASE : List[str] = image / 2 + 0.5
SCREAMING_SNAKE_CASE : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 323 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE : dict[str, str | None] = {}
SCREAMING_SNAKE_CASE : List[str] = source_vertex
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex}
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE : str = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vertex
queue.append(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE : Tuple = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowerCamelCase_ )
return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}'''
if __name__ == "__main__":
__UpperCAmelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 323 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''facebook/bart-large-mnli'''
SCREAMING_SNAKE_CASE__ = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
SCREAMING_SNAKE_CASE__ = '''text_classifier'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer
SCREAMING_SNAKE_CASE__ = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE__ = ['''text''', ['''text''']]
SCREAMING_SNAKE_CASE__ = ['''text''']
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().setup()
SCREAMING_SNAKE_CASE : List[str] = self.model.config
SCREAMING_SNAKE_CASE : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
SCREAMING_SNAKE_CASE : List[str] = int(lowerCamelCase_ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = labels
return self.pre_processor(
[text] * len(lowerCamelCase_ ) , [f'''This example is {label}''' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = outputs.logits
SCREAMING_SNAKE_CASE : int = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 323 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__UpperCAmelCase = 0
__UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__UpperCAmelCase = tuple[int, int]
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Node | None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = pos_x
SCREAMING_SNAKE_CASE : Any = pos_y
SCREAMING_SNAKE_CASE : Optional[int] = (pos_y, pos_x)
SCREAMING_SNAKE_CASE : Tuple = goal_x
SCREAMING_SNAKE_CASE : List[str] = goal_y
SCREAMING_SNAKE_CASE : Optional[Any] = g_cost
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : int = self.calculate_heuristic()
SCREAMING_SNAKE_CASE : Tuple = self.g_cost + self.h_cost
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.pos_x - self.goal_x
SCREAMING_SNAKE_CASE : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase_ ) + abs(lowerCamelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Optional[Any] , lowerCamelCase_ : Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [self.start]
SCREAMING_SNAKE_CASE : list[Node] = []
SCREAMING_SNAKE_CASE : str = False
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE : Optional[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase_ )
self.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_successors(lowerCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase_ )
else:
self.open_nodes.append(lowerCamelCase_ )
return [self.start.pos]
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
for action in delta:
SCREAMING_SNAKE_CASE : Dict = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) )
return successors
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Node | None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = node
SCREAMING_SNAKE_CASE : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE : Optional[Any] = current_node.parent
path.reverse()
return path
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = False
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
SCREAMING_SNAKE_CASE : List[str] = self.fwd_astar.open_nodes.pop(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase_ , lowerCamelCase_ )
self.fwd_astar.closed_nodes.append(lowerCamelCase_ )
self.bwd_astar.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = current_bwd_node
SCREAMING_SNAKE_CASE : Any = current_fwd_node
SCREAMING_SNAKE_CASE : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase_ )
else:
astar.open_nodes.append(lowerCamelCase_ )
return [self.fwd_astar.start.pos]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Node , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.fwd_astar.retrace_path(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.bwd_astar.retrace_path(lowerCamelCase_ )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__UpperCAmelCase = (0, 0)
__UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__UpperCAmelCase = time.time()
__UpperCAmelCase = AStar(init, goal)
__UpperCAmelCase = a_star.search()
__UpperCAmelCase = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
__UpperCAmelCase = time.time()
__UpperCAmelCase = BidirectionalAStar(init, goal)
__UpperCAmelCase = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 323 | 1 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = os.path.join(args.tf_model_dir , """parameters.json""" )
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(open(lowerCamelCase_ ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith(""".pt""" ):
SCREAMING_SNAKE_CASE : str = args.output + """.pt"""
SCREAMING_SNAKE_CASE : int = OrderedDict()
with tf.device("""/CPU:0""" ):
SCREAMING_SNAKE_CASE : Any = tf.train.load_checkpoint(args.tf_model_dir )
SCREAMING_SNAKE_CASE : Any = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
SCREAMING_SNAKE_CASE : int = reader.get_tensor(lowerCamelCase_ ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
SCREAMING_SNAKE_CASE : Optional[Any] = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
SCREAMING_SNAKE_CASE : Dict = 8
SCREAMING_SNAKE_CASE : Optional[int] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
SCREAMING_SNAKE_CASE : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCamelCase_ )
elif key_name.startswith("""model/moe""" ):
SCREAMING_SNAKE_CASE : Optional[Any] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
SCREAMING_SNAKE_CASE : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : str = torch.tensor(lowerCamelCase_ )
elif key_name.endswith("""/softmlp/kernel""" ):
SCREAMING_SNAKE_CASE : List[str] = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
SCREAMING_SNAKE_CASE : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(lowerCamelCase_ )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
SCREAMING_SNAKE_CASE : int = key_name[-9:-7]
for i in range(16 ):
SCREAMING_SNAKE_CASE : Optional[Any] = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
SCREAMING_SNAKE_CASE : Dict = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(lowerCamelCase_ )
elif key_name.startswith("""model/mlp""" ):
SCREAMING_SNAKE_CASE : Optional[int] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
SCREAMING_SNAKE_CASE : List[Any] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
SCREAMING_SNAKE_CASE : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(lowerCamelCase_ )
elif key_name.endswith("""/p1/bias""" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
SCREAMING_SNAKE_CASE : Tuple = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(lowerCamelCase_ )
elif key_name.endswith("""/p2/kernel""" ):
SCREAMING_SNAKE_CASE : Any = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
SCREAMING_SNAKE_CASE : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(lowerCamelCase_ )
elif key_name.endswith("""/p2/bias""" ):
SCREAMING_SNAKE_CASE : Tuple = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
SCREAMING_SNAKE_CASE : Optional[Any] = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCamelCase_ )
elif key_name.startswith("""model/ln""" ):
SCREAMING_SNAKE_CASE : Any = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
SCREAMING_SNAKE_CASE : int = """model.blocks.%d.feed_forward.norm.bias""" % player
SCREAMING_SNAKE_CASE : Union[str, Any] = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(lowerCamelCase_ )
elif key_name.endswith("""/g""" ):
SCREAMING_SNAKE_CASE : List[str] = """model.blocks.%d.feed_forward.norm.weight""" % player
SCREAMING_SNAKE_CASE : Any = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : int = torch.tensor(lowerCamelCase_ )
elif key_name.startswith("""model/att""" ):
SCREAMING_SNAKE_CASE : Optional[int] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
SCREAMING_SNAKE_CASE : Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
SCREAMING_SNAKE_CASE : Optional[int] = state[:, 0, :, :]
SCREAMING_SNAKE_CASE : str = state[:, 1, :, :]
SCREAMING_SNAKE_CASE : List[Any] = state[:, 2, :, :]
SCREAMING_SNAKE_CASE : Optional[int] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Any = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : List[Any] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Any = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ )
elif key_name.endswith("""/o/kernel""" ):
SCREAMING_SNAKE_CASE : Optional[int] = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
SCREAMING_SNAKE_CASE : Tuple = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ )
elif key_name.startswith("""model/an""" ):
SCREAMING_SNAKE_CASE : Optional[Any] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
SCREAMING_SNAKE_CASE : Dict = """model.blocks.%d.self_attn.norm.bias""" % player
SCREAMING_SNAKE_CASE : Any = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(lowerCamelCase_ )
elif key_name.endswith("""/g""" ):
SCREAMING_SNAKE_CASE : List[Any] = """model.blocks.%d.self_attn.norm.weight""" % player
SCREAMING_SNAKE_CASE : int = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : str = torch.tensor(lowerCamelCase_ )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
SCREAMING_SNAKE_CASE : Tuple = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
SCREAMING_SNAKE_CASE : int = """model.%s.weight""" % nlayer
SCREAMING_SNAKE_CASE : List[Any] = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE : str = torch.tensor(lowerCamelCase_ )
if key_name.startswith("""model/wte""" ):
SCREAMING_SNAKE_CASE : Dict = """lm_head.weight"""
SCREAMING_SNAKE_CASE : List[Any] = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(lowerCamelCase_ )
elif key_name.startswith("""model/wob""" ):
SCREAMING_SNAKE_CASE : List[str] = """final_logits_bias"""
SCREAMING_SNAKE_CASE : Any = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE : Optional[int] = state.reshape((1, -1) )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(lowerCamelCase_ )
elif key_name == "model/dense/kernel":
SCREAMING_SNAKE_CASE : Any = """model.last_project.weight"""
SCREAMING_SNAKE_CASE : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : int = torch.tensor(lowerCamelCase_ )
elif key_name == "model/dense_1/bias":
SCREAMING_SNAKE_CASE : Any = """model.last_project.bias"""
SCREAMING_SNAKE_CASE : List[str] = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(lowerCamelCase_ )
torch.save(lowerCamelCase_ , args.output )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
__UpperCAmelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 323 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''efficientnet'''
def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : int = width_coefficient
SCREAMING_SNAKE_CASE : List[str] = depth_coefficient
SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisor
SCREAMING_SNAKE_CASE : List[str] = kernel_sizes
SCREAMING_SNAKE_CASE : Dict = in_channels
SCREAMING_SNAKE_CASE : List[str] = out_channels
SCREAMING_SNAKE_CASE : Any = depthwise_padding
SCREAMING_SNAKE_CASE : Dict = strides
SCREAMING_SNAKE_CASE : Optional[Any] = num_block_repeats
SCREAMING_SNAKE_CASE : Any = expand_ratios
SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dim
SCREAMING_SNAKE_CASE : List[str] = pooling_type
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = batch_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum
SCREAMING_SNAKE_CASE : Dict = dropout_rate
SCREAMING_SNAKE_CASE : int = drop_connect_rate
SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowerCamelCase_ ) * 4
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return 1e-5
| 323 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''ctrl'''
SCREAMING_SNAKE_CASE__ = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[str] , lowerCamelCase_ : List[str]=24_65_34 , lowerCamelCase_ : List[str]=2_56 , lowerCamelCase_ : Optional[int]=12_80 , lowerCamelCase_ : Dict=81_92 , lowerCamelCase_ : Union[str, Any]=48 , lowerCamelCase_ : Optional[int]=16 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : Tuple=1e-6 , lowerCamelCase_ : str=0.02 , lowerCamelCase_ : List[Any]=True , **lowerCamelCase_ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = n_positions
SCREAMING_SNAKE_CASE : str = n_embd
SCREAMING_SNAKE_CASE : List[str] = n_layer
SCREAMING_SNAKE_CASE : Optional[Any] = n_head
SCREAMING_SNAKE_CASE : Optional[int] = dff
SCREAMING_SNAKE_CASE : Tuple = resid_pdrop
SCREAMING_SNAKE_CASE : Optional[int] = embd_pdrop
SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = use_cache
super().__init__(**lowerCamelCase_ )
| 323 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(lowercase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {}
SCREAMING_SNAKE_CASE : List[Any] = {}
if prompt is not None:
SCREAMING_SNAKE_CASE : List[Any] = prompt
if generate_kwargs is not None:
SCREAMING_SNAKE_CASE : Optional[int] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
SCREAMING_SNAKE_CASE : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ : Any ):
'''simple docstring'''
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(lowerCamelCase_ )
if prompt is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.model_type
if model_type == "git":
SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : str = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids
SCREAMING_SNAKE_CASE : Optional[int] = [self.tokenizer.cls_token_id] + input_ids
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCamelCase_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
SCREAMING_SNAKE_CASE : Optional[Any] = None
return model_inputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any]=None ):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , lowerCamelCase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
SCREAMING_SNAKE_CASE : List[str] = None
if generate_kwargs is None:
SCREAMING_SNAKE_CASE : int = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop(self.model.main_input_name )
SCREAMING_SNAKE_CASE : Any = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ )
return model_outputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for output_ids in model_outputs:
SCREAMING_SNAKE_CASE : List[Any] = {
"""generated_text""": self.tokenizer.decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , )
}
records.append(lowerCamelCase_ )
return records
| 323 | 1 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__UpperCAmelCase = logging.getLogger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : int=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.layer[current_layer](lowerCamelCase_ , lowerCamelCase_ , head_mask[current_layer] )
SCREAMING_SNAKE_CASE : Any = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''' , lowercase_ , )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = BertEncoderWithPabee(lowerCamelCase_ )
self.init_weights()
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Dict = 0
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = threshold
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = patience
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : List[str] = 0
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.inference_layers_num / self.inference_instances_num
SCREAMING_SNAKE_CASE : Dict = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCamelCase_ )
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : str=None , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : int=None , lowerCamelCase_ : Any=False , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
SCREAMING_SNAKE_CASE : Any = input_ids.size()
elif inputs_embeds is not None:
SCREAMING_SNAKE_CASE : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
SCREAMING_SNAKE_CASE : Any = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
SCREAMING_SNAKE_CASE : List[Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ )
if token_type_ids is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , dtype=torch.long , device=lowerCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
SCREAMING_SNAKE_CASE : torch.Tensor = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = encoder_hidden_states.size()
SCREAMING_SNAKE_CASE : Union[str, Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
SCREAMING_SNAKE_CASE : int = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self.invert_attention_mask(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Any = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
SCREAMING_SNAKE_CASE : List[str] = self.get_head_mask(lowerCamelCase_ , self.config.num_hidden_layers )
SCREAMING_SNAKE_CASE : int = self.embeddings(
input_ids=lowerCamelCase_ , position_ids=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , inputs_embeds=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = embedding_output
if self.training:
SCREAMING_SNAKE_CASE : Any = []
for i in range(self.config.num_hidden_layers ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.encoder.adaptive_forward(
lowerCamelCase_ , current_layer=lowerCamelCase_ , attention_mask=lowerCamelCase_ , head_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.pooler(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = output_layers[i](output_dropout(lowerCamelCase_ ) )
res.append(lowerCamelCase_ )
elif self.patience == 0: # Use all layers for inference
SCREAMING_SNAKE_CASE : Tuple = self.encoder(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , head_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Dict = self.pooler(encoder_outputs[0] )
SCREAMING_SNAKE_CASE : List[str] = [output_layers[self.config.num_hidden_layers - 1](lowerCamelCase_ )]
else:
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
SCREAMING_SNAKE_CASE : Dict = self.encoder.adaptive_forward(
lowerCamelCase_ , current_layer=lowerCamelCase_ , attention_mask=lowerCamelCase_ , head_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.pooler(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = output_layers[i](lowerCamelCase_ )
if regression:
SCREAMING_SNAKE_CASE : List[Any] = logits.detach()
if patient_result is not None:
SCREAMING_SNAKE_CASE : Optional[int] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
SCREAMING_SNAKE_CASE : Any = 0
else:
SCREAMING_SNAKE_CASE : Any = logits.detach().argmax(dim=1 )
if patient_result is not None:
SCREAMING_SNAKE_CASE : int = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowerCamelCase_ ) ):
patient_counter += 1
else:
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : str = logits
if patient_counter == self.patience:
break
SCREAMING_SNAKE_CASE : Any = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. ''' , lowercase_ , )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
super().__init__(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = config.num_labels
SCREAMING_SNAKE_CASE : str = BertModelWithPabee(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE : Tuple = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int=None , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Any=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.bert(
input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , position_ids=lowerCamelCase_ , head_mask=lowerCamelCase_ , inputs_embeds=lowerCamelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
SCREAMING_SNAKE_CASE : List[str] = (logits[-1],)
if labels is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = 0
for ix, logits_item in enumerate(lowerCamelCase_ ):
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE : Union[str, Any] = MSELoss()
SCREAMING_SNAKE_CASE : List[str] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE : Tuple = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : int = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
SCREAMING_SNAKE_CASE : Dict = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
SCREAMING_SNAKE_CASE : Optional[Any] = (total_loss / total_weights,) + outputs
return outputs
| 323 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCamelCase_ )
return config
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0]
SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1]
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = 1
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = scheduler.timesteps
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCamelCase_ ):
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Dict = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0]
with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 323 | 1 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE : List[str] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = -1
SCREAMING_SNAKE_CASE : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE : Union[str, Any] = TextStreamer(lowerCamelCase_ )
model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ , streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE : Dict = cs.out[:-1]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE : Tuple = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = -1
SCREAMING_SNAKE_CASE : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = tokenizer.decode(greedy_ids[0] )
SCREAMING_SNAKE_CASE : Tuple = TextIteratorStreamer(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
SCREAMING_SNAKE_CASE : Optional[Any] = Thread(target=model.generate , kwargs=lowerCamelCase_ )
thread.start()
SCREAMING_SNAKE_CASE : int = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = -1
SCREAMING_SNAKE_CASE : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = greedy_ids[:, input_ids.shape[1] :]
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE : Any = TextStreamer(lowerCamelCase_ , skip_prompt=lowerCamelCase_ )
model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ , streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE : Union[str, Any] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = -1
SCREAMING_SNAKE_CASE : Any = torch.ones((1, 5) , device=lowerCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE : Dict = TextStreamer(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
model.generate(lowerCamelCase_ , max_new_tokens=1 , do_sample=lowerCamelCase_ , streamer=lowerCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
SCREAMING_SNAKE_CASE : List[str] = cs.out[:-1] # Remove the final "\n"
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(lowerCamelCase_ , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = -1
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = TextIteratorStreamer(lowerCamelCase_ , timeout=0.001 )
SCREAMING_SNAKE_CASE : List[str] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
SCREAMING_SNAKE_CASE : Any = Thread(target=model.generate , kwargs=lowerCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 323 |
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE : Optional[int] = Text(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE : int = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 323 | 1 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __A ( lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if token is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
SCREAMING_SNAKE_CASE : List[Any] = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE : Optional[Any] = requests.get(lowerCamelCase_ , headers=lowerCamelCase_ ).json()
SCREAMING_SNAKE_CASE : int = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
SCREAMING_SNAKE_CASE : Dict = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Dict = requests.get(url + f'''&page={i + 2}''' , headers=lowerCamelCase_ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __A ( lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = None
if token is not None:
SCREAMING_SNAKE_CASE : Optional[int] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
SCREAMING_SNAKE_CASE : int = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
SCREAMING_SNAKE_CASE : int = requests.get(lowerCamelCase_ , headers=lowerCamelCase_ ).json()
SCREAMING_SNAKE_CASE : Optional[int] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
SCREAMING_SNAKE_CASE : Tuple = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = requests.get(url + f'''&page={i + 2}''' , headers=lowerCamelCase_ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = None
if token is not None:
SCREAMING_SNAKE_CASE : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
SCREAMING_SNAKE_CASE : str = requests.get(lowerCamelCase_ , headers=lowerCamelCase_ , allow_redirects=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = result.headers["""Location"""]
SCREAMING_SNAKE_CASE : Dict = requests.get(lowerCamelCase_ , allow_redirects=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(lowerCamelCase_ , f'''{artifact_name}.zip''' )
with open(lowerCamelCase_ , """wb""" ) as fp:
fp.write(response.content )
def __A ( lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : List[str] = None
with zipfile.ZipFile(lowerCamelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(lowerCamelCase_ ) as f:
for line in f:
SCREAMING_SNAKE_CASE : List[str] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
SCREAMING_SNAKE_CASE : List[Any] = line[: line.index(""": """ )]
SCREAMING_SNAKE_CASE : List[str] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
SCREAMING_SNAKE_CASE : Optional[Any] = line[len("""FAILED """ ) :]
failed_tests.append(lowerCamelCase_ )
elif filename == "job_name.txt":
SCREAMING_SNAKE_CASE : Any = line
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(lowerCamelCase_ )} for `errors` '''
f'''and {len(lowerCamelCase_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
SCREAMING_SNAKE_CASE : Dict = None
if job_name and job_links:
SCREAMING_SNAKE_CASE : int = job_links.get(lowerCamelCase_ , lowerCamelCase_ )
# A list with elements of the form (line of error, error, failed test)
SCREAMING_SNAKE_CASE : Optional[int] = [x + [y] + [job_link] for x, y in zip(lowerCamelCase_ , lowerCamelCase_ )]
return result
def __A ( lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Any = [os.path.join(lowerCamelCase_ , lowerCamelCase_ ) for p in os.listdir(lowerCamelCase_ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(lowerCamelCase_ , job_links=lowerCamelCase_ ) )
return errors
def __A ( lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Counter()
counter.update([x[1] for x in logs] )
SCREAMING_SNAKE_CASE : Union[str, Any] = counter.most_common()
SCREAMING_SNAKE_CASE : Tuple = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
SCREAMING_SNAKE_CASE : List[Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
SCREAMING_SNAKE_CASE : List[str] = dict(sorted(r.items() , key=lambda lowerCamelCase_ : item[1]["count"] , reverse=lowerCamelCase_ ) )
return r
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
SCREAMING_SNAKE_CASE : Optional[int] = test.split("""/""" )[2]
else:
SCREAMING_SNAKE_CASE : Tuple = None
return test
def __A ( lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [(x[0], x[1], get_model(x[2] )) for x in logs]
SCREAMING_SNAKE_CASE : List[str] = [x for x in logs if x[2] is not None]
SCREAMING_SNAKE_CASE : List[str] = {x[2] for x in logs}
SCREAMING_SNAKE_CASE : Dict = {}
for test in tests:
SCREAMING_SNAKE_CASE : List[str] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
SCREAMING_SNAKE_CASE : int = counter.most_common()
SCREAMING_SNAKE_CASE : int = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
SCREAMING_SNAKE_CASE : str = sum(error_counts.values() )
if n_errors > 0:
SCREAMING_SNAKE_CASE : str = {"""count""": n_errors, """errors""": error_counts}
SCREAMING_SNAKE_CASE : Any = dict(sorted(r.items() , key=lambda lowerCamelCase_ : item[1]["count"] , reverse=lowerCamelCase_ ) )
return r
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = """| no. | error | status |"""
SCREAMING_SNAKE_CASE : Optional[int] = """|-:|:-|:-|"""
SCREAMING_SNAKE_CASE : Any = [header, sep]
for error in reduced_by_error:
SCREAMING_SNAKE_CASE : Optional[Any] = reduced_by_error[error]["""count"""]
SCREAMING_SNAKE_CASE : Union[str, Any] = f'''| {count} | {error[:1_00]} | |'''
lines.append(lowerCamelCase_ )
return "\n".join(lowerCamelCase_ )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = """| model | no. of errors | major error | count |"""
SCREAMING_SNAKE_CASE : Any = """|-:|-:|-:|-:|"""
SCREAMING_SNAKE_CASE : str = [header, sep]
for model in reduced_by_model:
SCREAMING_SNAKE_CASE : List[Any] = reduced_by_model[model]["""count"""]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = list(reduced_by_model[model]["""errors"""].items() )[0]
SCREAMING_SNAKE_CASE : Tuple = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(lowerCamelCase_ )
return "\n".join(lowerCamelCase_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
__UpperCAmelCase = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__UpperCAmelCase = get_job_links(args.workflow_run_id, token=args.token)
__UpperCAmelCase = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__UpperCAmelCase = k.find(""" / """)
__UpperCAmelCase = k[index + len(""" / """) :]
__UpperCAmelCase = v
with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__UpperCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__UpperCAmelCase = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__UpperCAmelCase = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__UpperCAmelCase = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__UpperCAmelCase = reduce_by_error(errors)
__UpperCAmelCase = reduce_by_model(errors)
__UpperCAmelCase = make_github_table(reduced_by_error)
__UpperCAmelCase = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
| 323 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 3_84
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE : List[str] = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE : Any = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE : int = 1_28
SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE : Optional[Any] = 12
SCREAMING_SNAKE_CASE : str = 5_12
elif "large" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 1_92
SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE : Tuple = 12
SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68
# set label information
SCREAMING_SNAKE_CASE : List[str] = 1_50
SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files"""
SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
SCREAMING_SNAKE_CASE : List[str] = UperNetConfig(
backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , )
return config
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = val
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE : Dict = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :]
# fmt: on
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape
SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape
SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = x.shape[0]
SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(lowerCamelCase_ , param.shape )
SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ )
if "bn" in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" )
SCREAMING_SNAKE_CASE : Optional[Any] = val
# rename keys
SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ )
if "norm" in key:
SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify on image
SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCAmelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 323 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''efficientnet'''
def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : int = width_coefficient
SCREAMING_SNAKE_CASE : List[str] = depth_coefficient
SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisor
SCREAMING_SNAKE_CASE : List[str] = kernel_sizes
SCREAMING_SNAKE_CASE : Dict = in_channels
SCREAMING_SNAKE_CASE : List[str] = out_channels
SCREAMING_SNAKE_CASE : Any = depthwise_padding
SCREAMING_SNAKE_CASE : Dict = strides
SCREAMING_SNAKE_CASE : Optional[Any] = num_block_repeats
SCREAMING_SNAKE_CASE : Any = expand_ratios
SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dim
SCREAMING_SNAKE_CASE : List[str] = pooling_type
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = batch_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum
SCREAMING_SNAKE_CASE : Dict = dropout_rate
SCREAMING_SNAKE_CASE : int = drop_connect_rate
SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowerCamelCase_ ) * 4
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return 1e-5
| 323 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Any = pad_token_id
SCREAMING_SNAKE_CASE : List[Any] = max_length
SCREAMING_SNAKE_CASE : Optional[int] = vocab
SCREAMING_SNAKE_CASE : List[Any] = merges
SCREAMING_SNAKE_CASE : Tuple = BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : GPTaTokenizer , *lowerCamelCase_ : str , **lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [""" """.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab()
return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
return cls(**lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tf_tokenizer(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.ones_like(lowerCamelCase_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = pad_model_inputs(
lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 323 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths
SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train"""
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Union[str, Any] = streaming
SCREAMING_SNAKE_CASE : Optional[int] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : int = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Tuple = streaming
SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
| 323 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = """ylacombe/bark-small"""
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : str = """en_speaker_1"""
SCREAMING_SNAKE_CASE : Optional[int] = """This is a test string"""
SCREAMING_SNAKE_CASE : Optional[int] = """speaker_embeddings_path.json"""
SCREAMING_SNAKE_CASE : List[Any] = """speaker_embeddings"""
def lowerCamelCase_ ( self : int , **lowerCamelCase_ : int ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE : int = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
SCREAMING_SNAKE_CASE : List[str] = 35
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : int = 8
SCREAMING_SNAKE_CASE : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string )
SCREAMING_SNAKE_CASE : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 323 | 1 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
set_seed(770)
__UpperCAmelCase = {
"""c_attn""": """att_proj""",
"""c_proj""": """out_proj""",
"""c_fc""": """in_proj""",
"""transformer.""": """""",
"""h.""": """layers.""",
"""ln_1""": """layernorm_1""",
"""ln_2""": """layernorm_2""",
"""ln_f""": """layernorm_final""",
"""wpe""": """position_embeds_layer""",
"""wte""": """input_embeds_layer""",
}
__UpperCAmelCase = {
"""text_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text.pt""",
},
"""coarse_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse.pt""",
},
"""fine_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine.pt""",
},
"""text""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text_2.pt""",
},
"""coarse""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse_2.pt""",
},
"""fine""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine_2.pt""",
},
}
__UpperCAmelCase = os.path.dirname(os.path.abspath(__file__))
__UpperCAmelCase = os.path.join(os.path.expanduser("""~"""), """.cache""")
__UpperCAmelCase = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def __A ( lowerCamelCase_ , lowerCamelCase_=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = model_type
if use_small:
key += "_small"
return os.path.join(lowerCamelCase_ , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
hf_hub_download(repo_id=lowerCamelCase_ , filename=lowerCamelCase_ , local_dir=lowerCamelCase_ )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_="text" ):
"""simple docstring"""
if model_type == "text":
SCREAMING_SNAKE_CASE : Optional[Any] = BarkSemanticModel
SCREAMING_SNAKE_CASE : Dict = BarkSemanticConfig
SCREAMING_SNAKE_CASE : List[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
SCREAMING_SNAKE_CASE : Any = BarkCoarseModel
SCREAMING_SNAKE_CASE : Tuple = BarkCoarseConfig
SCREAMING_SNAKE_CASE : List[str] = BarkCoarseGenerationConfig
elif model_type == "fine":
SCREAMING_SNAKE_CASE : List[Any] = BarkFineModel
SCREAMING_SNAKE_CASE : Dict = BarkFineConfig
SCREAMING_SNAKE_CASE : List[str] = BarkFineGenerationConfig
else:
raise NotImplementedError()
SCREAMING_SNAKE_CASE : str = f'''{model_type}_small''' if use_small else model_type
SCREAMING_SNAKE_CASE : Tuple = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCamelCase_ ):
logger.info(f'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
SCREAMING_SNAKE_CASE : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_ )
# this is a hack
SCREAMING_SNAKE_CASE : List[str] = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
SCREAMING_SNAKE_CASE : Dict = model_args["""vocab_size"""]
SCREAMING_SNAKE_CASE : Optional[Any] = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
SCREAMING_SNAKE_CASE : str = model_args.pop("""n_head""" )
SCREAMING_SNAKE_CASE : List[str] = model_args.pop("""n_embd""" )
SCREAMING_SNAKE_CASE : List[Any] = model_args.pop("""n_layer""" )
SCREAMING_SNAKE_CASE : Tuple = ConfigClass(**checkpoint["""model_args"""] )
SCREAMING_SNAKE_CASE : Union[str, Any] = ModelClass(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = GenerationConfigClass()
SCREAMING_SNAKE_CASE : List[str] = model_generation_config
SCREAMING_SNAKE_CASE : Any = checkpoint["""model"""]
# fixup checkpoint
SCREAMING_SNAKE_CASE : List[str] = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(lowerCamelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
SCREAMING_SNAKE_CASE : Union[str, Any] = k[len(lowerCamelCase_ ) :]
for old_layer_name in new_layer_name_dict:
SCREAMING_SNAKE_CASE : Union[str, Any] = new_k.replace(lowerCamelCase_ , new_layer_name_dict[old_layer_name] )
SCREAMING_SNAKE_CASE : Any = state_dict.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = set(state_dict.keys() ) - set(model.state_dict().keys() )
SCREAMING_SNAKE_CASE : Union[str, Any] = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
SCREAMING_SNAKE_CASE : Any = set(model.state_dict().keys() ) - set(state_dict.keys() )
SCREAMING_SNAKE_CASE : Tuple = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(lowerCamelCase_ ) != 0:
raise ValueError(f'''extra keys found: {extra_keys}''' )
if len(lowerCamelCase_ ) != 0:
raise ValueError(f'''missing keys: {missing_keys}''' )
model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model.num_parameters(exclude_embeddings=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = checkpoint["""best_val_loss"""].item()
logger.info(f'''model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowerCamelCase_ , 3 )} loss''' )
model.eval()
model.to(lowerCamelCase_ )
del checkpoint, state_dict
return model
def __A ( lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_="text" ):
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
SCREAMING_SNAKE_CASE : str = """cpu""" # do conversion on cpu
SCREAMING_SNAKE_CASE : Optional[int] = _get_ckpt_path(lowerCamelCase_ , use_small=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = _load_model(lowerCamelCase_ , lowerCamelCase_ , model_type=lowerCamelCase_ , use_small=lowerCamelCase_ )
# load bark initial model
SCREAMING_SNAKE_CASE : List[str] = _bark_load_model(lowerCamelCase_ , """cpu""" , model_type=lowerCamelCase_ , use_small=lowerCamelCase_ )
if model_type == "text":
SCREAMING_SNAKE_CASE : Optional[int] = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=lowerCamelCase_ ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
SCREAMING_SNAKE_CASE : Optional[Any] = 5
SCREAMING_SNAKE_CASE : Optional[int] = 10
if model_type in ["text", "coarse"]:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int )
SCREAMING_SNAKE_CASE : str = bark_model(lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ )
# take last logits
SCREAMING_SNAKE_CASE : List[Any] = output_new_model_total.logits[:, [-1], :]
else:
SCREAMING_SNAKE_CASE : str = 3
SCREAMING_SNAKE_CASE : List[Any] = 8
SCREAMING_SNAKE_CASE : Any = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = bark_model(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = BarkSemanticConfig.from_pretrained(os.path.join(lowerCamelCase_ , """config.json""" ) )
SCREAMING_SNAKE_CASE : Optional[int] = BarkCoarseConfig.from_pretrained(os.path.join(lowerCamelCase_ , """config.json""" ) )
SCREAMING_SNAKE_CASE : Optional[int] = BarkFineConfig.from_pretrained(os.path.join(lowerCamelCase_ , """config.json""" ) )
SCREAMING_SNAKE_CASE : List[str] = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
SCREAMING_SNAKE_CASE : Optional[int] = BarkSemanticModel.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = BarkCoarseModel.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = BarkFineModel.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
SCREAMING_SNAKE_CASE : int = BarkConfig.from_sub_model_configs(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
SCREAMING_SNAKE_CASE : Tuple = BarkModel(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = semantic
SCREAMING_SNAKE_CASE : List[str] = coarseAcoustic
SCREAMING_SNAKE_CASE : List[Any] = fineAcoustic
SCREAMING_SNAKE_CASE : Tuple = codec
SCREAMING_SNAKE_CASE : Union[str, Any] = bark_generation_config
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
bark.save_pretrained(lowerCamelCase_ , repo_id=lowerCamelCase_ , push_to_hub=lowerCamelCase_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
__UpperCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 323 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCAmelCase = logging.getLogger(__name__)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE : Dict = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE : Optional[int] = processor.get_labels()
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
SCREAMING_SNAKE_CASE : Optional[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE : Dict = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCamelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCamelCase_ , p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Any = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.evaluate()
SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowerCamelCase_ , lowerCamelCase_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowerCamelCase_ )
return results
def __A ( lowerCamelCase_ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 323 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(lowerCamelCase_ ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , )
return min(
minimax(depth + 1 , node_index * 2 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
SCREAMING_SNAKE_CASE : Tuple = math.log(len(lowerCamelCase_ ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 323 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block
SCREAMING_SNAKE_CASE : int = torch.nn.Convad(
lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Any = output_channel
SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block(
lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
self.down_blocks.append(lowerCamelCase_ )
# mid
SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# out
SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU()
SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Tuple = False
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = x
SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[Any] ):
def custom_forward(*lowerCamelCase_ : List[str] ):
return module(*lowerCamelCase_ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ )
# post-process
SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : int = layers_per_block
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad(
lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] )
SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None
# mid
SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# up
SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : List[Any] = get_up_block(
lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , )
self.up_blocks.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : Dict = nn.SiLU()
SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Dict = False
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = z
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[str] ):
def custom_forward(*lowerCamelCase_ : str ):
return module(*lowerCamelCase_ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = n_e
SCREAMING_SNAKE_CASE : int = vq_embed_dim
SCREAMING_SNAKE_CASE : Tuple = beta
SCREAMING_SNAKE_CASE : Union[str, Any] = legacy
SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE : Optional[Any] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0]
SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed
SCREAMING_SNAKE_CASE : Any = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
SCREAMING_SNAKE_CASE : Optional[int] = n_e
SCREAMING_SNAKE_CASE : Any = sane_index_shape
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 )
SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE : Any = self.unknown_index
return new.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero
SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ )
return back.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : List[str] = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
if self.remap is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ )
if shape is not None:
SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parameters
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 )
SCREAMING_SNAKE_CASE : Dict = deterministic
SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = randn_tensor(
self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample
return x
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.mean
| 323 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''xlm-roberta-xl'''
def __init__( self : Dict , lowerCamelCase_ : List[str]=25_08_80 , lowerCamelCase_ : Optional[Any]=25_60 , lowerCamelCase_ : Any=36 , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : List[str]=1_02_40 , lowerCamelCase_ : Optional[int]="gelu" , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : Dict=5_14 , lowerCamelCase_ : Any=1 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : Optional[int]=1e-05 , lowerCamelCase_ : List[Any]=1 , lowerCamelCase_ : List[str]=0 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : Optional[int]="absolute" , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : int=None , **lowerCamelCase_ : List[str] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
SCREAMING_SNAKE_CASE : List[str] = use_cache
SCREAMING_SNAKE_CASE : Dict = classifier_dropout
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 323 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxAutoencoderKL
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : str = 3
SCREAMING_SNAKE_CASE : List[Any] = (32, 32)
SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Any = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input
return init_dict, inputs_dict
| 323 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''longformer'''
def __init__( self : Tuple , lowerCamelCase_ : Union[List[int], int] = 5_12 , lowerCamelCase_ : int = 2 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : int = 0 , lowerCamelCase_ : int = 2 , lowerCamelCase_ : int = 3_05_22 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 30_72 , lowerCamelCase_ : str = "gelu" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 2 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 1e-12 , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = attention_window
SCREAMING_SNAKE_CASE : Optional[int] = sep_token_id
SCREAMING_SNAKE_CASE : Union[str, Any] = bos_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = eos_token_id
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = onnx_export
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : "PretrainedConfig" , lowerCamelCase_ : str = "default" , lowerCamelCase_ : "List[PatchingSpec]" = None ):
'''simple docstring'''
super().__init__(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = True
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE : str = {0: """batch"""}
return outputs
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return 1e-4
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : "PreTrainedTokenizerBase" , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[TensorType] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = super().generate_dummy_inputs(
preprocessor=lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE : Optional[Any] = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
SCREAMING_SNAKE_CASE : str = 1
return inputs
| 323 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
pass
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ )
import datasets
SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
SCREAMING_SNAKE_CASE : Any = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , lowerCamelCase_ , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = """Intel/dpt-large"""
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""depth-estimation""" , model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
SCREAMING_SNAKE_CASE : str = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 323 | 1 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.dirname(os.path.realpath(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = os.path.join(lowerCamelCase_ , """words.txt""" )
SCREAMING_SNAKE_CASE : int = """"""
with open(lowerCamelCase_ ) as f:
SCREAMING_SNAKE_CASE : Union[str, Any] = f.readline()
SCREAMING_SNAKE_CASE : Optional[Any] = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
SCREAMING_SNAKE_CASE : str = [
word
for word in [sum(ord(lowerCamelCase_ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowerCamelCase_ )
if __name__ == "__main__":
print(solution())
| 323 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Dict=2_24 , lowerCamelCase_ : List[Any]=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Dict = min_resolution
SCREAMING_SNAKE_CASE : List[str] = max_resolution
SCREAMING_SNAKE_CASE : str = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean
SCREAMING_SNAKE_CASE : str = image_std
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ViTImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = EfficientFormerImageProcessorTester(self )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 323 | 1 |
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = TaConfig.from_json_file(lowerCamelCase_ )
print(f'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = TaForConditionalGeneration(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 323 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCAmelCase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 323 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 323 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''encoder-decoder'''
SCREAMING_SNAKE_CASE__ = True
def __init__( self : Optional[Any] , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("""encoder""" )
SCREAMING_SNAKE_CASE : Dict = encoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("""decoder""" )
SCREAMING_SNAKE_CASE : Dict = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE : str = AutoConfig.for_model(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.for_model(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = True
@classmethod
def lowerCamelCase_ ( cls : Dict , lowerCamelCase_ : PretrainedConfig , lowerCamelCase_ : PretrainedConfig , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Any = self.encoder.to_dict()
SCREAMING_SNAKE_CASE : Any = self.decoder.to_dict()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.__class__.model_type
return output
| 323 |
'''simple docstring'''
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number | (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number & ~(1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number ^ (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=8 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __A ( lowerCamelCase_ , lowerCamelCase_=5_12 , lowerCamelCase_=5_12 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
SCREAMING_SNAKE_CASE : Optional[int] = np.array(pil_image.convert("""RGB""" ) )
SCREAMING_SNAKE_CASE : int = arr.astype(np.floataa ) / 127.5 - 1
SCREAMING_SNAKE_CASE : Optional[int] = np.transpose(lowerCamelCase_ , [2, 0, 1] )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(lowerCamelCase_ ).unsqueeze(0 )
return image
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase_ : UNetaDConditionModel , lowerCamelCase_ : DDPMScheduler , lowerCamelCase_ : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , movq=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = min(int(num_inference_steps * strength ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any]=None ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase_ )}''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = image.to(device=lowerCamelCase_ , dtype=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE : str = image
else:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCamelCase_ )
]
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(lowerCamelCase_ , dim=0 )
else:
SCREAMING_SNAKE_CASE : List[str] = self.movq.encode(lowerCamelCase_ ).latent_dist.sample(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE : List[str] = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE : Tuple = init_latents.shape
SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ , dtype=lowerCamelCase_ )
# get latents
SCREAMING_SNAKE_CASE : Dict = self.scheduler.add_noise(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = init_latents
return latents
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
SCREAMING_SNAKE_CASE : Dict = torch.device(f'''cuda:{gpu_id}''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowerCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE : Any = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = cpu_offload_with_hook(lowerCamelCase_ , lowerCamelCase_ , prev_module_hook=lowerCamelCase_ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase_ )
def __call__( self : Dict , lowerCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowerCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 1_00 , lowerCamelCase_ : float = 4.0 , lowerCamelCase_ : float = 0.3 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self._execution_device
SCREAMING_SNAKE_CASE : Union[str, Any] = guidance_scale > 1.0
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : List[Any] = image_embeds.shape[0]
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(lowerCamelCase_ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE : List[str] = image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Any = negative_image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = [image]
if not all(isinstance(lowerCamelCase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f'''Input is in incorrect format: {[type(lowerCamelCase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
SCREAMING_SNAKE_CASE : List[str] = torch.cat([prepare_image(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE : List[str] = image.to(dtype=image_embeds.dtype , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.movq.encode(lowerCamelCase_ )["""latents"""]
SCREAMING_SNAKE_CASE : Optional[Any] = latents.repeat_interleave(lowerCamelCase_ , dim=0 )
self.scheduler.set_timesteps(lowerCamelCase_ , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_timesteps(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = downscale_height_and_width(lowerCamelCase_ , lowerCamelCase_ , self.movq_scale_factor )
SCREAMING_SNAKE_CASE : Dict = self.prepare_latents(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , image_embeds.dtype , lowerCamelCase_ , lowerCamelCase_ )
for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE : Dict = {"""image_embeds""": image_embeds}
SCREAMING_SNAKE_CASE : List[Any] = self.unet(
sample=lowerCamelCase_ , timestep=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , added_cond_kwargs=lowerCamelCase_ , return_dict=lowerCamelCase_ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ , )[0]
# post-processing
SCREAMING_SNAKE_CASE : Tuple = self.movq.decode(lowerCamelCase_ , force_not_quantize=lowerCamelCase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE : Optional[Any] = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE : Optional[Any] = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : List[str] = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 323 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=14 , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : int=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Union[str, Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=5_12 , lowerCamelCase_ : Union[str, Any]=0.02 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = rotary_dim
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = vocab_size - 1
SCREAMING_SNAKE_CASE : str = vocab_size - 1
SCREAMING_SNAKE_CASE : List[Any] = vocab_size - 1
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[str] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 20
SCREAMING_SNAKE_CASE : Any = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE : Any = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : str = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 20
SCREAMING_SNAKE_CASE : Dict = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
SCREAMING_SNAKE_CASE : str = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE : Any = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Dict = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
SCREAMING_SNAKE_CASE__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxGPTJModelTester(self )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@tooslow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id
SCREAMING_SNAKE_CASE : str = jax.jit(model.generate )
SCREAMING_SNAKE_CASE : str = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Optional[int] = pt_model_class(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = pt_model(**lowerCamelCase_ ).to_tuple()
SCREAMING_SNAKE_CASE : Any = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = fx_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = pt_model_class(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : List[Any] = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Tuple = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = pt_model(**lowerCamelCase_ ).to_tuple()
SCREAMING_SNAKE_CASE : Optional[Any] = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = pt_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 323 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = jnp.floataa
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = hidden_states.shape
SCREAMING_SNAKE_CASE : Union[str, Any] = jax.image.resize(
lowerCamelCase_ , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
SCREAMING_SNAKE_CASE : List[Any] = self.conv(lowerCamelCase_ )
return hidden_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = jnp.floataa
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.conv(lowerCamelCase_ )
return hidden_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = jnp.floataa
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.in_channels if self.out_channels is None else self.out_channels
SCREAMING_SNAKE_CASE : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Dict = nn.Conv(
lowerCamelCase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE : Dict = nn.Dense(lowerCamelCase_ , dtype=self.dtype )
SCREAMING_SNAKE_CASE : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(self.dropout_prob )
SCREAMING_SNAKE_CASE : List[Any] = nn.Conv(
lowerCamelCase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE : Any = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
SCREAMING_SNAKE_CASE : Dict = None
if use_nin_shortcut:
SCREAMING_SNAKE_CASE : str = nn.Conv(
lowerCamelCase_ , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any]=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = hidden_states
SCREAMING_SNAKE_CASE : Union[str, Any] = self.norma(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = nn.swish(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = self.conva(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.time_emb_proj(nn.swish(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.expand_dims(jnp.expand_dims(lowerCamelCase_ , 1 ) , 1 )
SCREAMING_SNAKE_CASE : Tuple = hidden_states + temb
SCREAMING_SNAKE_CASE : Union[str, Any] = self.norma(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = nn.swish(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.dropout(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.conva(lowerCamelCase_ )
if self.conv_shortcut is not None:
SCREAMING_SNAKE_CASE : int = self.conv_shortcut(lowerCamelCase_ )
return hidden_states + residual
| 323 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim
SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim
SCREAMING_SNAKE_CASE : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
SCREAMING_SNAKE_CASE : str = (
nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
SCREAMING_SNAKE_CASE : Any = GPTaConfig(
vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 )
SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ):
'''simple docstring'''
return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.encode_prefix(lowerCamelCase_ )
@torch.no_grad()
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 )
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Tuple = []
for feature in features:
SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam(
input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = eos_token_id
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool )
if input_embeds is not None:
SCREAMING_SNAKE_CASE : Dict = input_embeds
else:
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log()
if scores is None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
SCREAMING_SNAKE_CASE : List[Any] = next_tokens
else:
SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] )
SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 )
else:
SCREAMING_SNAKE_CASE : Tuple = -float(np.inf )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source]
SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 )
SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source]
SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source]
SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths
SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source]
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 )
SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze()
if is_stopped.all():
break
SCREAMING_SNAKE_CASE : int = scores / seq_lengths
SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ )
# tokens tensors are already padded to max_seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order]
SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 323 | 1 |
'''simple docstring'''
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number | (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number & ~(1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number ^ (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git_vision_model'''
def __init__( self : int , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Optional[Any]="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = hidden_act
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git'''
def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=1_01 , lowerCamelCase_ : Optional[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
if vision_config is None:
SCREAMING_SNAKE_CASE : Any = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
SCREAMING_SNAKE_CASE : int = num_image_with_embedding
SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
SCREAMING_SNAKE_CASE : str = eos_token_id
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 323 | 1 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 323 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Tuple = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i, rect in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 )
target.move_to(lowerCamelCase_ )
model_arr.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCamelCase_ )
self.add(*lowerCamelCase_ , *lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE : Optional[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 )
input.set_fill(lowerCamelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 )
self.play(Write(lowerCamelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.02 )
self.play(MoveToTarget(lowerCamelCase_ ) )
self.play(FadeOut(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE : Optional[int] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE : Any = AnimationGroup(
FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowerCamelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE : Optional[Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = a_c
SCREAMING_SNAKE_CASE : Optional[Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) )
self.wait()
| 323 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCAmelCase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE : dict[str, str | None] = {}
SCREAMING_SNAKE_CASE : List[str] = source_vertex
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex}
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE : str = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vertex
queue.append(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE : Tuple = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowerCamelCase_ )
return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}'''
if __name__ == "__main__":
__UpperCAmelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 323 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Dict=2_24 , lowerCamelCase_ : List[Any]=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Dict = min_resolution
SCREAMING_SNAKE_CASE : List[str] = max_resolution
SCREAMING_SNAKE_CASE : str = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean
SCREAMING_SNAKE_CASE : str = image_std
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ViTImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = EfficientFormerImageProcessorTester(self )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 323 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__UpperCAmelCase = 0
__UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__UpperCAmelCase = tuple[int, int]
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Node | None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = pos_x
SCREAMING_SNAKE_CASE : Any = pos_y
SCREAMING_SNAKE_CASE : Optional[int] = (pos_y, pos_x)
SCREAMING_SNAKE_CASE : Tuple = goal_x
SCREAMING_SNAKE_CASE : List[str] = goal_y
SCREAMING_SNAKE_CASE : Optional[Any] = g_cost
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : int = self.calculate_heuristic()
SCREAMING_SNAKE_CASE : Tuple = self.g_cost + self.h_cost
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.pos_x - self.goal_x
SCREAMING_SNAKE_CASE : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase_ ) + abs(lowerCamelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Optional[Any] , lowerCamelCase_ : Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [self.start]
SCREAMING_SNAKE_CASE : list[Node] = []
SCREAMING_SNAKE_CASE : str = False
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE : Optional[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase_ )
self.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_successors(lowerCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase_ )
else:
self.open_nodes.append(lowerCamelCase_ )
return [self.start.pos]
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
for action in delta:
SCREAMING_SNAKE_CASE : Dict = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) )
return successors
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Node | None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = node
SCREAMING_SNAKE_CASE : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE : Optional[Any] = current_node.parent
path.reverse()
return path
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = False
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
SCREAMING_SNAKE_CASE : List[str] = self.fwd_astar.open_nodes.pop(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase_ , lowerCamelCase_ )
self.fwd_astar.closed_nodes.append(lowerCamelCase_ )
self.bwd_astar.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = current_bwd_node
SCREAMING_SNAKE_CASE : Any = current_fwd_node
SCREAMING_SNAKE_CASE : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase_ )
else:
astar.open_nodes.append(lowerCamelCase_ )
return [self.fwd_astar.start.pos]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Node , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.fwd_astar.retrace_path(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.bwd_astar.retrace_path(lowerCamelCase_ )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__UpperCAmelCase = (0, 0)
__UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__UpperCAmelCase = time.time()
__UpperCAmelCase = AStar(init, goal)
__UpperCAmelCase = a_star.search()
__UpperCAmelCase = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
__UpperCAmelCase = time.time()
__UpperCAmelCase = BidirectionalAStar(init, goal)
__UpperCAmelCase = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 323 | 1 |
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__UpperCAmelCase = datasets.logging.get_logger(__name__)
__UpperCAmelCase = """\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
"""
__UpperCAmelCase = """\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project's README at https://github.com/google-research/bleurt#readme for more information.
"""
__UpperCAmelCase = """
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
'scores': List of scores.
Examples:
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> bleurt = datasets.load_metric(\"bleurt\")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results[\"scores\"]])
[1.03, 1.04]
"""
__UpperCAmelCase = {
"""bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""",
"""bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""",
"""bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""",
"""bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""",
"""bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""",
"""bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""",
"""BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""",
"""BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""",
"""BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""",
"""BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int ):
'''simple docstring'''
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
SCREAMING_SNAKE_CASE : Optional[int] = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE : Tuple = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.config_name.upper()
else:
raise KeyError(
f'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
SCREAMING_SNAKE_CASE : str = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
SCREAMING_SNAKE_CASE : List[Any] = score.BleurtScorer(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.scorer.score(references=lowerCamelCase_ , candidates=lowerCamelCase_ )
return {"scores": scores}
| 323 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''efficientnet'''
def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : int = width_coefficient
SCREAMING_SNAKE_CASE : List[str] = depth_coefficient
SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisor
SCREAMING_SNAKE_CASE : List[str] = kernel_sizes
SCREAMING_SNAKE_CASE : Dict = in_channels
SCREAMING_SNAKE_CASE : List[str] = out_channels
SCREAMING_SNAKE_CASE : Any = depthwise_padding
SCREAMING_SNAKE_CASE : Dict = strides
SCREAMING_SNAKE_CASE : Optional[Any] = num_block_repeats
SCREAMING_SNAKE_CASE : Any = expand_ratios
SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dim
SCREAMING_SNAKE_CASE : List[str] = pooling_type
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = batch_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum
SCREAMING_SNAKE_CASE : Dict = dropout_rate
SCREAMING_SNAKE_CASE : int = drop_connect_rate
SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowerCamelCase_ ) * 4
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return 1e-5
| 323 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
__UpperCAmelCase = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
__UpperCAmelCase = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
__UpperCAmelCase = """▁"""
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Any="<s>" , lowerCamelCase_ : Any="</s>" , lowerCamelCase_ : Optional[int]="</s>" , lowerCamelCase_ : Optional[Any]="<s>" , lowerCamelCase_ : str="<unk>" , lowerCamelCase_ : List[str]="<pad>" , lowerCamelCase_ : List[str]="<mask>" , lowerCamelCase_ : Optional[Dict[str, Any]] = None , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
SCREAMING_SNAKE_CASE : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = vocab_file
SCREAMING_SNAKE_CASE : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : List[Any] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
SCREAMING_SNAKE_CASE : Dict = len(self.sp_model ) - 1
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : int , lowerCamelCase_ : str ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.PieceToId(lowerCamelCase_ )
return spm_id if spm_id else self.unk_token_id
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Dict ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Any = """"""
SCREAMING_SNAKE_CASE : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase_ ) + token
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : List[str] = []
else:
current_sub_tokens.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = False
out_string += self.sp_model.decode(lowerCamelCase_ )
return out_string.strip()
def __getstate__( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE : Tuple = None
return state
def __setstate__( self : List[str] , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
SCREAMING_SNAKE_CASE : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE : List[str] = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , """wb""" ) as fi:
SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
| 323 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(lowercase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {}
SCREAMING_SNAKE_CASE : List[Any] = {}
if prompt is not None:
SCREAMING_SNAKE_CASE : List[Any] = prompt
if generate_kwargs is not None:
SCREAMING_SNAKE_CASE : Optional[int] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
SCREAMING_SNAKE_CASE : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ : Any ):
'''simple docstring'''
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(lowerCamelCase_ )
if prompt is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.model_type
if model_type == "git":
SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : str = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids
SCREAMING_SNAKE_CASE : Optional[int] = [self.tokenizer.cls_token_id] + input_ids
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCamelCase_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
SCREAMING_SNAKE_CASE : Optional[Any] = None
return model_inputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any]=None ):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , lowerCamelCase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
SCREAMING_SNAKE_CASE : List[str] = None
if generate_kwargs is None:
SCREAMING_SNAKE_CASE : int = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop(self.model.main_input_name )
SCREAMING_SNAKE_CASE : Any = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ )
return model_outputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for output_ids in model_outputs:
SCREAMING_SNAKE_CASE : List[Any] = {
"""generated_text""": self.tokenizer.decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , )
}
records.append(lowerCamelCase_ )
return records
| 323 | 1 |
'''simple docstring'''
import operator as op
__UpperCAmelCase = """scaler.pt"""
__UpperCAmelCase = """pytorch_model"""
__UpperCAmelCase = """random_states"""
__UpperCAmelCase = """optimizer"""
__UpperCAmelCase = """scheduler"""
__UpperCAmelCase = """pytorch_model.bin"""
__UpperCAmelCase = """pytorch_model.bin.index.json"""
__UpperCAmelCase = """model.safetensors"""
__UpperCAmelCase = """model.safetensors.index.json"""
__UpperCAmelCase = """1.10.2"""
__UpperCAmelCase = """py38"""
__UpperCAmelCase = """4.17.0"""
__UpperCAmelCase = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
__UpperCAmelCase = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
__UpperCAmelCase = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
__UpperCAmelCase = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
__UpperCAmelCase = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
__UpperCAmelCase = """2.0.1"""
__UpperCAmelCase = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
__UpperCAmelCase = ["""default""", """reduce-overhead""", """max-autotune"""]
__UpperCAmelCase = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__UpperCAmelCase = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
__UpperCAmelCase = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
__UpperCAmelCase = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 323 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCamelCase_ )
return config
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0]
SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1]
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = 1
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = scheduler.timesteps
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCamelCase_ ):
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Dict = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0]
with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 323 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCAmelCase = logging.getLogger(__name__)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE : Dict = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE : Optional[int] = processor.get_labels()
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
SCREAMING_SNAKE_CASE : Optional[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE : Dict = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCamelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCamelCase_ , p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Any = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.evaluate()
SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowerCamelCase_ , lowerCamelCase_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowerCamelCase_ )
return results
def __A ( lowerCamelCase_ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 323 |
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE : Optional[int] = Text(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE : int = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 323 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 3_84
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE : List[str] = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE : Any = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE : int = 1_28
SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE : Optional[Any] = 12
SCREAMING_SNAKE_CASE : str = 5_12
elif "large" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 1_92
SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE : Tuple = 12
SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68
# set label information
SCREAMING_SNAKE_CASE : List[str] = 1_50
SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files"""
SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
SCREAMING_SNAKE_CASE : List[str] = UperNetConfig(
backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , )
return config
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = val
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE : Dict = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :]
# fmt: on
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape
SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape
SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = x.shape[0]
SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(lowerCamelCase_ , param.shape )
SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ )
if "bn" in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" )
SCREAMING_SNAKE_CASE : Optional[Any] = val
# rename keys
SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ )
if "norm" in key:
SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify on image
SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCAmelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 323 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''timm_backbone'''
def __init__( self : Tuple , lowerCamelCase_ : int=None , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[Any]=None , **lowerCamelCase_ : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = backbone
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = features_only
SCREAMING_SNAKE_CASE : Any = use_pretrained_backbone
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = out_indices if out_indices is not None else (-1,)
| 323 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Any = pad_token_id
SCREAMING_SNAKE_CASE : List[Any] = max_length
SCREAMING_SNAKE_CASE : Optional[int] = vocab
SCREAMING_SNAKE_CASE : List[Any] = merges
SCREAMING_SNAKE_CASE : Tuple = BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : GPTaTokenizer , *lowerCamelCase_ : str , **lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [""" """.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab()
return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
return cls(**lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tf_tokenizer(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.ones_like(lowerCamelCase_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = pad_model_inputs(
lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 323 | 1 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 10
SCREAMING_SNAKE_CASE : Any = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
SCREAMING_SNAKE_CASE : Any = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(lowerCamelCase_ ) ),
} , features=lowerCamelCase_ , )
return dataset
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=lowerCamelCase_ )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = """\
Text data.
Second line of data."""
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
SCREAMING_SNAKE_CASE : Union[str, Any] = FILE_CONTENT
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return filename
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
import bza
SCREAMING_SNAKE_CASE : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
SCREAMING_SNAKE_CASE : Union[str, Any] = bytes(lowerCamelCase_ , """utf-8""" )
with bza.open(lowerCamelCase_ , """wb""" ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
import gzip
SCREAMING_SNAKE_CASE : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
SCREAMING_SNAKE_CASE : Tuple = bytes(lowerCamelCase_ , """utf-8""" )
with gzip.open(lowerCamelCase_ , """wb""" ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
SCREAMING_SNAKE_CASE : str = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
SCREAMING_SNAKE_CASE : Dict = bytes(lowerCamelCase_ , """utf-8""" )
with lza.frame.open(lowerCamelCase_ , """wb""" ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
SCREAMING_SNAKE_CASE : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowerCamelCase_ , """w""" ) as archive:
archive.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
import tarfile
SCREAMING_SNAKE_CASE : int = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(lowerCamelCase_ , """w""" ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
import lzma
SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
SCREAMING_SNAKE_CASE : int = bytes(lowerCamelCase_ , """utf-8""" )
with lzma.open(lowerCamelCase_ , """wb""" ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
import zipfile
SCREAMING_SNAKE_CASE : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
SCREAMING_SNAKE_CASE : int = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
SCREAMING_SNAKE_CASE : Dict = bytes(lowerCamelCase_ , """utf-8""" )
with zstd.open(lowerCamelCase_ , """wb""" ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
SCREAMING_SNAKE_CASE : Optional[int] = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return filename
__UpperCAmelCase = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
__UpperCAmelCase = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
__UpperCAmelCase = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
__UpperCAmelCase = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope="""session""" )
def __A ( ):
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = datasets.Dataset.from_dict(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(lowerCamelCase_ ) ) as con:
SCREAMING_SNAKE_CASE : List[Any] = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(lowerCamelCase_ , """w""" , newline="""""" ) as f:
SCREAMING_SNAKE_CASE : Tuple = csv.DictWriter(lowerCamelCase_ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(lowerCamelCase_ , """w""" , newline="""""" ) as f:
SCREAMING_SNAKE_CASE : int = csv.DictWriter(lowerCamelCase_ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
import bza
SCREAMING_SNAKE_CASE : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(lowerCamelCase_ , """rb""" ) as f:
SCREAMING_SNAKE_CASE : Optional[Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase_ , """wb""" ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
SCREAMING_SNAKE_CASE : List[Any] = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(lowerCamelCase_ , """wb""" ) as f:
SCREAMING_SNAKE_CASE : Optional[Any] = pq.ParquetWriter(lowerCamelCase_ , schema=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase_ ) )] for k in DATA[0]} , schema=lowerCamelCase_ )
writer.write_table(lowerCamelCase_ )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
SCREAMING_SNAKE_CASE : Optional[Any] = {"""data""": DATA}
with open(lowerCamelCase_ , """w""" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
SCREAMING_SNAKE_CASE : Optional[Any] = {"""data""": DATA_DICT_OF_LISTS}
with open(lowerCamelCase_ , """w""" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(lowerCamelCase_ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(lowerCamelCase_ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(lowerCamelCase_ , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(lowerCamelCase_ , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
import gzip
SCREAMING_SNAKE_CASE : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(lowerCamelCase_ , """rb""" ) as orig_file:
with gzip.open(lowerCamelCase_ , """wb""" ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
import gzip
SCREAMING_SNAKE_CASE : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(lowerCamelCase_ , """rb""" ) as orig_file:
with gzip.open(lowerCamelCase_ , """wb""" ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join("""nested""" , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowerCamelCase_ , """w""" ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowerCamelCase_ , """w""" ) as f:
f.add(lowerCamelCase_ , arcname=os.path.join("""nested""" , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ["""0""", """1""", """2""", """3"""]
SCREAMING_SNAKE_CASE : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(lowerCamelCase_ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = ["""0""", """1""", """2""", """3"""]
SCREAMING_SNAKE_CASE : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(lowerCamelCase_ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = ["""0""", """1""", """2""", """3"""]
SCREAMING_SNAKE_CASE : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(lowerCamelCase_ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(lowerCamelCase_ , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
SCREAMING_SNAKE_CASE : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope="""session""" )
def __A ( ):
"""simple docstring"""
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def __A ( ):
"""simple docstring"""
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 323 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths
SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train"""
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Union[str, Any] = streaming
SCREAMING_SNAKE_CASE : Optional[int] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : int = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Tuple = streaming
SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
| 323 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __A ( lowerCamelCase_ ):
"""simple docstring"""
return np.maximum(0 , lowerCamelCase_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 323 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = """ylacombe/bark-small"""
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : str = """en_speaker_1"""
SCREAMING_SNAKE_CASE : Optional[int] = """This is a test string"""
SCREAMING_SNAKE_CASE : Optional[int] = """speaker_embeddings_path.json"""
SCREAMING_SNAKE_CASE : List[Any] = """speaker_embeddings"""
def lowerCamelCase_ ( self : int , **lowerCamelCase_ : int ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE : int = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
SCREAMING_SNAKE_CASE : List[str] = 35
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : int = 8
SCREAMING_SNAKE_CASE : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string )
SCREAMING_SNAKE_CASE : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 323 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__UpperCAmelCase = 0
__UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__UpperCAmelCase = tuple[int, int]
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Node | None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = pos_x
SCREAMING_SNAKE_CASE : Any = pos_y
SCREAMING_SNAKE_CASE : Optional[int] = (pos_y, pos_x)
SCREAMING_SNAKE_CASE : Tuple = goal_x
SCREAMING_SNAKE_CASE : List[str] = goal_y
SCREAMING_SNAKE_CASE : Optional[Any] = g_cost
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : int = self.calculate_heuristic()
SCREAMING_SNAKE_CASE : Tuple = self.g_cost + self.h_cost
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.pos_x - self.goal_x
SCREAMING_SNAKE_CASE : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase_ ) + abs(lowerCamelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Optional[Any] , lowerCamelCase_ : Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [self.start]
SCREAMING_SNAKE_CASE : list[Node] = []
SCREAMING_SNAKE_CASE : str = False
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE : Optional[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase_ )
self.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_successors(lowerCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase_ )
else:
self.open_nodes.append(lowerCamelCase_ )
return [self.start.pos]
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
for action in delta:
SCREAMING_SNAKE_CASE : Dict = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) )
return successors
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Node | None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = node
SCREAMING_SNAKE_CASE : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE : Optional[Any] = current_node.parent
path.reverse()
return path
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = False
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
SCREAMING_SNAKE_CASE : List[str] = self.fwd_astar.open_nodes.pop(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase_ , lowerCamelCase_ )
self.fwd_astar.closed_nodes.append(lowerCamelCase_ )
self.bwd_astar.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = current_bwd_node
SCREAMING_SNAKE_CASE : Any = current_fwd_node
SCREAMING_SNAKE_CASE : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase_ )
else:
astar.open_nodes.append(lowerCamelCase_ )
return [self.fwd_astar.start.pos]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Node , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.fwd_astar.retrace_path(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.bwd_astar.retrace_path(lowerCamelCase_ )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__UpperCAmelCase = (0, 0)
__UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__UpperCAmelCase = time.time()
__UpperCAmelCase = AStar(init, goal)
__UpperCAmelCase = a_star.search()
__UpperCAmelCase = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
__UpperCAmelCase = time.time()
__UpperCAmelCase = BidirectionalAStar(init, goal)
__UpperCAmelCase = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 323 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCAmelCase = logging.getLogger(__name__)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE : Dict = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE : Optional[int] = processor.get_labels()
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
SCREAMING_SNAKE_CASE : Optional[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE : Dict = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCamelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCamelCase_ , p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Any = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.evaluate()
SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowerCamelCase_ , lowerCamelCase_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowerCamelCase_ )
return results
def __A ( lowerCamelCase_ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 323 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = """▁"""
__UpperCAmelCase = {"""vocab_file""": """spiece.model"""}
__UpperCAmelCase = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
__UpperCAmelCase = {
"""google/pegasus-xsum""": 512,
}
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str="<pad>" , lowerCamelCase_ : Any="</s>" , lowerCamelCase_ : List[Any]="<unk>" , lowerCamelCase_ : List[Any]="<mask_2>" , lowerCamelCase_ : Tuple="<mask_1>" , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=1_03 , lowerCamelCase_ : Optional[Dict[str, Any]] = None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowerCamelCase_ )}, but is'''
f''' {type(lowerCamelCase_ )}''' )
SCREAMING_SNAKE_CASE : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowerCamelCase_ ) , self.offset - 1 )
]
if len(set(lowerCamelCase_ ) ) != len(lowerCamelCase_ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
SCREAMING_SNAKE_CASE : str = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE : List[Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
SCREAMING_SNAKE_CASE : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token_sent=lowerCamelCase_ , offset=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : str = mask_token_sent
SCREAMING_SNAKE_CASE : Dict = vocab_file
SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase_ )
# add special tokens to encoder dict
SCREAMING_SNAKE_CASE : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
SCREAMING_SNAKE_CASE : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model ) + self.offset
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.__dict__.copy()
SCREAMING_SNAKE_CASE : str = None
return state
def __setstate__( self : Optional[int] , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : str ):
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
SCREAMING_SNAKE_CASE : Any = self.sp_model.piece_to_id(lowerCamelCase_ )
return sp_id + self.offset
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : int ):
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
SCREAMING_SNAKE_CASE : Tuple = self.sp_model.IdToPiece(index - self.offset )
return token
def lowerCamelCase_ ( self : int , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Tuple = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase_ ) + token
SCREAMING_SNAKE_CASE : Union[str, Any] = []
else:
current_sub_tokens.append(lowerCamelCase_ )
out_string += self.sp_model.decode(lowerCamelCase_ )
return out_string.strip()
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=False ):
'''simple docstring'''
return 1
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List , lowerCamelCase_ : Optional[List] = None , lowerCamelCase_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase_ )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , """wb""" ) as fi:
SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
| 323 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block
SCREAMING_SNAKE_CASE : int = torch.nn.Convad(
lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Any = output_channel
SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block(
lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
self.down_blocks.append(lowerCamelCase_ )
# mid
SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# out
SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU()
SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Tuple = False
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = x
SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[Any] ):
def custom_forward(*lowerCamelCase_ : List[str] ):
return module(*lowerCamelCase_ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ )
# post-process
SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : int = layers_per_block
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad(
lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] )
SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None
# mid
SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# up
SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : List[Any] = get_up_block(
lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , )
self.up_blocks.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : Dict = nn.SiLU()
SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Dict = False
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = z
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[str] ):
def custom_forward(*lowerCamelCase_ : str ):
return module(*lowerCamelCase_ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = n_e
SCREAMING_SNAKE_CASE : int = vq_embed_dim
SCREAMING_SNAKE_CASE : Tuple = beta
SCREAMING_SNAKE_CASE : Union[str, Any] = legacy
SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE : Optional[Any] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0]
SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed
SCREAMING_SNAKE_CASE : Any = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
SCREAMING_SNAKE_CASE : Optional[int] = n_e
SCREAMING_SNAKE_CASE : Any = sane_index_shape
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 )
SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE : Any = self.unknown_index
return new.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero
SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ )
return back.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : List[str] = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
if self.remap is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ )
if shape is not None:
SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parameters
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 )
SCREAMING_SNAKE_CASE : Dict = deterministic
SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = randn_tensor(
self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample
return x
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.mean
| 323 | 1 |
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *lowerCamelCase_ : str , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Any=None , **lowerCamelCase_ : int ):
'''simple docstring'''
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = eval_examples
SCREAMING_SNAKE_CASE : Tuple = post_process_function
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any=None , lowerCamelCase_ : str=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : str = "eval" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_eval_dataloader(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE : Any = self.compute_metrics
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
SCREAMING_SNAKE_CASE : Any = time.time()
try:
SCREAMING_SNAKE_CASE : int = eval_loop(
lowerCamelCase_ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase_ , metric_key_prefix=lowerCamelCase_ , )
finally:
SCREAMING_SNAKE_CASE : int = compute_metrics
SCREAMING_SNAKE_CASE : List[Any] = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCamelCase_ , lowerCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
SCREAMING_SNAKE_CASE : Dict = self.post_process_function(lowerCamelCase_ , lowerCamelCase_ , output.predictions )
SCREAMING_SNAKE_CASE : Dict = self.compute_metrics(lowerCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
SCREAMING_SNAKE_CASE : List[str] = metrics.pop(lowerCamelCase_ )
metrics.update(output.metrics )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE : Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCamelCase_ )
return metrics
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : str = "test" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.get_test_dataloader(lowerCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE : int = self.compute_metrics
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
SCREAMING_SNAKE_CASE : Any = time.time()
try:
SCREAMING_SNAKE_CASE : int = eval_loop(
lowerCamelCase_ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase_ , metric_key_prefix=lowerCamelCase_ , )
finally:
SCREAMING_SNAKE_CASE : Tuple = compute_metrics
SCREAMING_SNAKE_CASE : int = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCamelCase_ , lowerCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE : str = self.post_process_function(lowerCamelCase_ , lowerCamelCase_ , output.predictions , """predict""" )
SCREAMING_SNAKE_CASE : Optional[int] = self.compute_metrics(lowerCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = metrics.pop(lowerCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCamelCase_ )
| 323 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxAutoencoderKL
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : str = 3
SCREAMING_SNAKE_CASE : List[Any] = (32, 32)
SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Any = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input
return init_dict, inputs_dict
| 323 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict=13 , lowerCamelCase_ : Any=7 , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Union[str, Any]=99 , lowerCamelCase_ : Any=32 , lowerCamelCase_ : Union[str, Any]=5 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : Tuple=37 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Tuple=5_12 , lowerCamelCase_ : Any=16 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : Union[str, Any]=0.02 , lowerCamelCase_ : str=4 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : int = use_attention_mask
SCREAMING_SNAKE_CASE : Dict = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : str = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCamelCase_ , )
return config, input_ids, attention_mask
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = FlaxDistilBertModelTester(self )
@slow
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class_name.from_pretrained("""distilbert-base-uncased""" )
SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
SCREAMING_SNAKE_CASE : List[str] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
SCREAMING_SNAKE_CASE : Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = (1, 11, 7_68)
self.assertEqual(output.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1e-4 ) )
| 323 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
pass
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ )
import datasets
SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
SCREAMING_SNAKE_CASE : Any = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , lowerCamelCase_ , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = """Intel/dpt-large"""
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""depth-estimation""" , model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
SCREAMING_SNAKE_CASE : str = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 323 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Dict=2_24 , lowerCamelCase_ : List[Any]=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Dict = min_resolution
SCREAMING_SNAKE_CASE : List[str] = max_resolution
SCREAMING_SNAKE_CASE : str = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean
SCREAMING_SNAKE_CASE : str = image_std
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ViTImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = EfficientFormerImageProcessorTester(self )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 323 | 1 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__UpperCAmelCase = get_logger()
__UpperCAmelCase = None
class UpperCamelCase__ ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Union[str, Any]=None , **lowerCamelCase_ : int ):
'''simple docstring'''
super().__init__(features=lowerCamelCase_ )
import jax
from jaxlib.xla_client import Device
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(lowerCamelCase_ )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
SCREAMING_SNAKE_CASE : int = device if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : List[str] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
SCREAMING_SNAKE_CASE : str = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE : Dict = jnp_array_kwargs
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
import jax
return {str(lowerCamelCase_ ): device for device in jax.devices()}
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and column:
if all(
isinstance(lowerCamelCase_ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(lowerCamelCase_ , axis=0 )
return column
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(lowerCamelCase_ , (str, bytes, type(lowerCamelCase_ )) ):
return value
elif isinstance(lowerCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if isinstance(lowerCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE : Tuple = {"""dtype""": jnp.intaa}
else:
SCREAMING_SNAKE_CASE : Any = {"""dtype""": jnp.intaa}
elif isinstance(lowerCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE : List[str] = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCamelCase_ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : int = np.asarray(lowerCamelCase_ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE : List[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(lowerCamelCase_ , **{**default_dtype, **self.jnp_array_kwargs} )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(lowerCamelCase_ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(lowerCamelCase_ , """__array__""" ) and not isinstance(lowerCamelCase_ , jax.Array ):
SCREAMING_SNAKE_CASE : Tuple = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCamelCase_ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCamelCase_ ) for substruct in data_struct] )
elif isinstance(lowerCamelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCamelCase_ ) for substruct in data_struct] )
return self._tensorize(lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : dict ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , lowerCamelCase_ , map_list=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : pa.Table ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.numpy_arrow_extractor().extract_row(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.python_features_decoder.decode_row(lowerCamelCase_ )
return self.recursive_tensorize(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : pa.Table ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_arrow_extractor().extract_column(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.python_features_decoder.decode_column(lowerCamelCase_ , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE : str = self.recursive_tensorize(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self._consolidate(lowerCamelCase_ )
return column
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : pa.Table ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.numpy_arrow_extractor().extract_batch(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.python_features_decoder.decode_batch(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.recursive_tensorize(lowerCamelCase_ )
for column_name in batch:
SCREAMING_SNAKE_CASE : List[str] = self._consolidate(batch[column_name] )
return batch
| 323 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCAmelCase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 | 1 |
'''simple docstring'''
from collections import defaultdict
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = 1
SCREAMING_SNAKE_CASE : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCamelCase_ )
if ret % 2 == 0:
cuts.append(lowerCamelCase_ )
return ret
def __A ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__UpperCAmelCase , __UpperCAmelCase = 10, 9
__UpperCAmelCase = defaultdict(list)
__UpperCAmelCase = {}
__UpperCAmelCase = []
__UpperCAmelCase = 0
__UpperCAmelCase = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 323 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 323 | 1 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __A ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCamelCase_ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def __A ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def __A ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCamelCase_ ):
http_head("""https://huggingface.co""" )
| 323 |
'''simple docstring'''
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number | (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number & ~(1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number ^ (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 | 1 |
'''simple docstring'''
__UpperCAmelCase = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 323 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=14 , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : int=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Union[str, Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=5_12 , lowerCamelCase_ : Union[str, Any]=0.02 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = rotary_dim
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = vocab_size - 1
SCREAMING_SNAKE_CASE : str = vocab_size - 1
SCREAMING_SNAKE_CASE : List[Any] = vocab_size - 1
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[str] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 20
SCREAMING_SNAKE_CASE : Any = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE : Any = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : str = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 20
SCREAMING_SNAKE_CASE : Dict = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
SCREAMING_SNAKE_CASE : str = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE : Any = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Dict = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
SCREAMING_SNAKE_CASE__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxGPTJModelTester(self )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@tooslow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id
SCREAMING_SNAKE_CASE : str = jax.jit(model.generate )
SCREAMING_SNAKE_CASE : str = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Optional[int] = pt_model_class(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = pt_model(**lowerCamelCase_ ).to_tuple()
SCREAMING_SNAKE_CASE : Any = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = fx_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = pt_model_class(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : List[Any] = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Tuple = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = pt_model(**lowerCamelCase_ ).to_tuple()
SCREAMING_SNAKE_CASE : Optional[Any] = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = pt_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 323 | 1 |
'''simple docstring'''
import argparse
__UpperCAmelCase = """docs/source/_static/js/custom.js"""
def __A ( lowerCamelCase_ ):
"""simple docstring"""
with open(lowerCamelCase_ , encoding="""utf-8""" , newline="""\n""" ) as f:
SCREAMING_SNAKE_CASE : int = f.readlines()
SCREAMING_SNAKE_CASE : List[Any] = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
SCREAMING_SNAKE_CASE : str = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowerCamelCase_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
__UpperCAmelCase = parser.parse_args()
update_custom_js(args.version)
| 323 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim
SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim
SCREAMING_SNAKE_CASE : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
SCREAMING_SNAKE_CASE : str = (
nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
SCREAMING_SNAKE_CASE : Any = GPTaConfig(
vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 )
SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ):
'''simple docstring'''
return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.encode_prefix(lowerCamelCase_ )
@torch.no_grad()
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 )
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Tuple = []
for feature in features:
SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam(
input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = eos_token_id
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool )
if input_embeds is not None:
SCREAMING_SNAKE_CASE : Dict = input_embeds
else:
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log()
if scores is None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
SCREAMING_SNAKE_CASE : List[Any] = next_tokens
else:
SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] )
SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 )
else:
SCREAMING_SNAKE_CASE : Tuple = -float(np.inf )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source]
SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 )
SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source]
SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source]
SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths
SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source]
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 )
SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze()
if is_stopped.all():
break
SCREAMING_SNAKE_CASE : int = scores / seq_lengths
SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ )
# tokens tensors are already padded to max_seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order]
SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 323 | 1 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = BeautifulSoup(requests.get(lowerCamelCase_ , params=lowerCamelCase_ ).content , """html.parser""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
SCREAMING_SNAKE_CASE : str = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
__UpperCAmelCase = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 323 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git_vision_model'''
def __init__( self : int , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Optional[Any]="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = hidden_act
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git'''
def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=1_01 , lowerCamelCase_ : Optional[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
if vision_config is None:
SCREAMING_SNAKE_CASE : Any = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
SCREAMING_SNAKE_CASE : int = num_image_with_embedding
SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
SCREAMING_SNAKE_CASE : str = eos_token_id
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 323 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Tuple = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i, rect in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 )
target.move_to(lowerCamelCase_ )
model_arr.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCamelCase_ )
self.add(*lowerCamelCase_ , *lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE : Optional[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 )
input.set_fill(lowerCamelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 )
self.play(Write(lowerCamelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.02 )
self.play(MoveToTarget(lowerCamelCase_ ) )
self.play(FadeOut(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE : Optional[int] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE : Any = AnimationGroup(
FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowerCamelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE : Optional[Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = a_c
SCREAMING_SNAKE_CASE : Optional[Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) )
self.wait()
| 323 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = LxmertConfig.from_json_file(lowerCamelCase_ )
print(f'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE : str = LxmertForPreTraining(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 323 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE : dict[str, str | None] = {}
SCREAMING_SNAKE_CASE : List[str] = source_vertex
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex}
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE : str = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vertex
queue.append(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE : Tuple = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowerCamelCase_ )
return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}'''
if __name__ == "__main__":
__UpperCAmelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 323 | 1 |
'''simple docstring'''
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__UpperCAmelCase = 0
__UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__UpperCAmelCase = tuple[int, int]
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Node | None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = pos_x
SCREAMING_SNAKE_CASE : Any = pos_y
SCREAMING_SNAKE_CASE : Optional[int] = (pos_y, pos_x)
SCREAMING_SNAKE_CASE : Tuple = goal_x
SCREAMING_SNAKE_CASE : List[str] = goal_y
SCREAMING_SNAKE_CASE : Optional[Any] = g_cost
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : int = self.calculate_heuristic()
SCREAMING_SNAKE_CASE : Tuple = self.g_cost + self.h_cost
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.pos_x - self.goal_x
SCREAMING_SNAKE_CASE : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase_ ) + abs(lowerCamelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Optional[Any] , lowerCamelCase_ : Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [self.start]
SCREAMING_SNAKE_CASE : list[Node] = []
SCREAMING_SNAKE_CASE : str = False
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE : Optional[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase_ )
self.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_successors(lowerCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase_ )
else:
self.open_nodes.append(lowerCamelCase_ )
return [self.start.pos]
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
for action in delta:
SCREAMING_SNAKE_CASE : Dict = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) )
return successors
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Node | None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = node
SCREAMING_SNAKE_CASE : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE : Optional[Any] = current_node.parent
path.reverse()
return path
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = False
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
SCREAMING_SNAKE_CASE : List[str] = self.fwd_astar.open_nodes.pop(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase_ , lowerCamelCase_ )
self.fwd_astar.closed_nodes.append(lowerCamelCase_ )
self.bwd_astar.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = current_bwd_node
SCREAMING_SNAKE_CASE : Any = current_fwd_node
SCREAMING_SNAKE_CASE : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase_ )
else:
astar.open_nodes.append(lowerCamelCase_ )
return [self.fwd_astar.start.pos]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Node , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.fwd_astar.retrace_path(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.bwd_astar.retrace_path(lowerCamelCase_ )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__UpperCAmelCase = (0, 0)
__UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__UpperCAmelCase = time.time()
__UpperCAmelCase = AStar(init, goal)
__UpperCAmelCase = a_star.search()
__UpperCAmelCase = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
__UpperCAmelCase = time.time()
__UpperCAmelCase = BidirectionalAStar(init, goal)
__UpperCAmelCase = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 323 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
SCREAMING_SNAKE_CASE : Any = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
SCREAMING_SNAKE_CASE : Any = 4
SCREAMING_SNAKE_CASE : Dict = 48
SCREAMING_SNAKE_CASE : Tuple = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = [6, 6, 6, 6]
SCREAMING_SNAKE_CASE : Any = 60
SCREAMING_SNAKE_CASE : Tuple = [6, 6, 6, 6]
SCREAMING_SNAKE_CASE : int = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = 4
SCREAMING_SNAKE_CASE : Dict = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : Optional[int] = 1_26
SCREAMING_SNAKE_CASE : Dict = 7
SCREAMING_SNAKE_CASE : int = 255.0
SCREAMING_SNAKE_CASE : Optional[Any] = """"""
return config
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
SCREAMING_SNAKE_CASE : Any = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE : int = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE : List[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
SCREAMING_SNAKE_CASE : Any = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
SCREAMING_SNAKE_CASE : Optional[int] = """layernorm.weight"""
if name == "norm.bias":
SCREAMING_SNAKE_CASE : Optional[int] = """layernorm.bias"""
if "conv_first" in name:
SCREAMING_SNAKE_CASE : int = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
SCREAMING_SNAKE_CASE : Any = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace("""upsample.2""" , """upsample.convolution_1""" )
SCREAMING_SNAKE_CASE : List[Any] = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
SCREAMING_SNAKE_CASE : Optional[int] = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
SCREAMING_SNAKE_CASE : int = """swin2sr.""" + name
return name
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : List[str] = orig_state_dict.pop(lowerCamelCase_ )
if "qkv" in key:
SCREAMING_SNAKE_CASE : Any = key.split(""".""" )
SCREAMING_SNAKE_CASE : Optional[int] = int(key_split[1] )
SCREAMING_SNAKE_CASE : Any = int(key_split[4] )
SCREAMING_SNAKE_CASE : Dict = config.embed_dim
if "weight" in key:
SCREAMING_SNAKE_CASE : List[Any] = val[:dim, :]
SCREAMING_SNAKE_CASE : int = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE : Tuple = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE : List[Any] = val[:dim]
SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2]
SCREAMING_SNAKE_CASE : Any = val[-dim:]
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = val
return orig_state_dict
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = get_config(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = SwinaSRForImageSuperResolution(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Any = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" )
SCREAMING_SNAKE_CASE : Tuple = convert_state_dict(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowerCamelCase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
SCREAMING_SNAKE_CASE : Optional[Any] = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE : Dict = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE : str = 1_26 if """Jpeg""" in checkpoint_url else 2_56
SCREAMING_SNAKE_CASE : Union[str, Any] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
SCREAMING_SNAKE_CASE : List[str] = transforms(lowerCamelCase_ ).unsqueeze(0 )
if config.num_channels == 1:
SCREAMING_SNAKE_CASE : List[str] = pixel_values[:, 0, :, :].unsqueeze(1 )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = torch.Size([1, 3, 5_12, 5_12] )
SCREAMING_SNAKE_CASE : int = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = torch.Size([1, 3, 10_24, 10_24] )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 10_24, 10_24] )
SCREAMING_SNAKE_CASE : int = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 5_12, 5_12] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
SCREAMING_SNAKE_CASE : Dict = torch.Size([1, 3, 10_24, 10_24] )
SCREAMING_SNAKE_CASE : int = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-3 )
print("""Looks ok!""" )
SCREAMING_SNAKE_CASE : Any = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
SCREAMING_SNAKE_CASE : Tuple = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
__UpperCAmelCase = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 323 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''efficientnet'''
def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : int = width_coefficient
SCREAMING_SNAKE_CASE : List[str] = depth_coefficient
SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisor
SCREAMING_SNAKE_CASE : List[str] = kernel_sizes
SCREAMING_SNAKE_CASE : Dict = in_channels
SCREAMING_SNAKE_CASE : List[str] = out_channels
SCREAMING_SNAKE_CASE : Any = depthwise_padding
SCREAMING_SNAKE_CASE : Dict = strides
SCREAMING_SNAKE_CASE : Optional[Any] = num_block_repeats
SCREAMING_SNAKE_CASE : Any = expand_ratios
SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dim
SCREAMING_SNAKE_CASE : List[str] = pooling_type
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = batch_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum
SCREAMING_SNAKE_CASE : Dict = dropout_rate
SCREAMING_SNAKE_CASE : int = drop_connect_rate
SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowerCamelCase_ ) * 4
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return 1e-5
| 323 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(lowercase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {}
SCREAMING_SNAKE_CASE : List[Any] = {}
if prompt is not None:
SCREAMING_SNAKE_CASE : List[Any] = prompt
if generate_kwargs is not None:
SCREAMING_SNAKE_CASE : Optional[int] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
SCREAMING_SNAKE_CASE : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ : Any ):
'''simple docstring'''
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(lowerCamelCase_ )
if prompt is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.model_type
if model_type == "git":
SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : str = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids
SCREAMING_SNAKE_CASE : Optional[int] = [self.tokenizer.cls_token_id] + input_ids
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCamelCase_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
SCREAMING_SNAKE_CASE : Optional[Any] = None
return model_inputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any]=None ):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , lowerCamelCase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
SCREAMING_SNAKE_CASE : List[str] = None
if generate_kwargs is None:
SCREAMING_SNAKE_CASE : int = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop(self.model.main_input_name )
SCREAMING_SNAKE_CASE : Any = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ )
return model_outputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for output_ids in model_outputs:
SCREAMING_SNAKE_CASE : List[Any] = {
"""generated_text""": self.tokenizer.decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , )
}
records.append(lowerCamelCase_ )
return records
| 323 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
def __init__( self : Union[str, Any] , lowerCamelCase_ : UNetaDModel , lowerCamelCase_ : ScoreSdeVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self : str , lowerCamelCase_ : int = 1 , lowerCamelCase_ : int = 20_00 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , **lowerCamelCase_ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.unet.config.sample_size
SCREAMING_SNAKE_CASE : Union[str, Any] = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE : List[Any] = self.unet
SCREAMING_SNAKE_CASE : Any = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase_ )
self.scheduler.set_sigmas(lowerCamelCase_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE : List[str] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE : Dict = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : str = self.scheduler.step_correct(lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ , lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : List[str] = self.scheduler.step_pred(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE : str = sample_mean.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Any = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Union[str, Any] = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 323 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCamelCase_ )
return config
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0]
SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1]
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = 1
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = scheduler.timesteps
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCamelCase_ ):
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Dict = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0]
with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 323 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths
SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train"""
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Union[str, Any] = streaming
SCREAMING_SNAKE_CASE : Optional[int] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : int = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Tuple = streaming
SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
| 323 |
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE : Optional[int] = Text(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE : int = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 323 | 1 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __A ( lowerCamelCase_ , lowerCamelCase_=() , lowerCamelCase_=None , lowerCamelCase_="no" , lowerCamelCase_="29500" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Any = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE : int = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE : str = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
SCREAMING_SNAKE_CASE : str = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , lowerCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
SCREAMING_SNAKE_CASE : Optional[int] = 8
SCREAMING_SNAKE_CASE : List[Any] = PrepareForLaunch(lowerCamelCase_ , distributed_type="""TPU""" )
print(f'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*lowerCamelCase_ )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr="""127.0.01""" , master_port=lowerCamelCase_ , mixed_precision=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = PrepareForLaunch(lowerCamelCase_ , distributed_type="""MULTI_GPU""" )
print(f'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE : Any = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*lowerCamelCase_ )
def __A ( lowerCamelCase_ , lowerCamelCase_=() , lowerCamelCase_=2 ):
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ):
SCREAMING_SNAKE_CASE : Any = PrepareForLaunch(lowerCamelCase_ , debug=lowerCamelCase_ )
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method="""fork""" )
| 323 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 3_84
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE : List[str] = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE : Any = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE : int = 1_28
SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE : Optional[Any] = 12
SCREAMING_SNAKE_CASE : str = 5_12
elif "large" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 1_92
SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE : Tuple = 12
SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68
# set label information
SCREAMING_SNAKE_CASE : List[str] = 1_50
SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files"""
SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
SCREAMING_SNAKE_CASE : List[str] = UperNetConfig(
backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , )
return config
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = val
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE : Dict = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :]
# fmt: on
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape
SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape
SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = x.shape[0]
SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(lowerCamelCase_ , param.shape )
SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ )
if "bn" in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" )
SCREAMING_SNAKE_CASE : Optional[Any] = val
# rename keys
SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ )
if "norm" in key:
SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify on image
SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCAmelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 323 | 1 |
'''simple docstring'''
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int=None , lowerCamelCase_ : Tuple=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = data
SCREAMING_SNAKE_CASE : int = previous
SCREAMING_SNAKE_CASE : Optional[int] = next_node
def __str__( self : Optional[Any] ):
'''simple docstring'''
return f'''{self.data}'''
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.data
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return self.next
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.previous
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = head
def __iter__( self : Any ):
'''simple docstring'''
return self
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
SCREAMING_SNAKE_CASE : Any = self.current.get_data()
SCREAMING_SNAKE_CASE : Optional[Any] = self.current.get_next()
return value
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = None # First node in list
SCREAMING_SNAKE_CASE : Tuple = None # Last node in list
def __str__( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
SCREAMING_SNAKE_CASE : str = []
while current is not None:
nodes.append(current.get_data() )
SCREAMING_SNAKE_CASE : List[str] = current.get_next()
return " ".join(str(lowerCamelCase_ ) for node in nodes )
def __contains__( self : Dict , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.head
while current:
if current.get_data() == value:
return True
SCREAMING_SNAKE_CASE : Optional[int] = current.get_next()
return False
def __iter__( self : Dict ):
'''simple docstring'''
return LinkedListIterator(self.head )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Node ):
'''simple docstring'''
if self.head is None:
SCREAMING_SNAKE_CASE : Tuple = node
SCREAMING_SNAKE_CASE : Optional[int] = node
else:
self.insert_before_node(self.head , lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Node ):
'''simple docstring'''
if self.head is None:
self.set_head(lowerCamelCase_ )
else:
self.insert_after_node(self.tail , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = Node(lowerCamelCase_ )
if self.head is None:
self.set_head(lowerCamelCase_ )
else:
self.set_tail(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Node , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = node
SCREAMING_SNAKE_CASE : str = node.previous
if node.get_previous() is None:
SCREAMING_SNAKE_CASE : Tuple = node_to_insert
else:
SCREAMING_SNAKE_CASE : int = node_to_insert
SCREAMING_SNAKE_CASE : int = node_to_insert
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Node , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = node
SCREAMING_SNAKE_CASE : Any = node.next
if node.get_next() is None:
SCREAMING_SNAKE_CASE : Tuple = node_to_insert
else:
SCREAMING_SNAKE_CASE : Optional[Any] = node_to_insert
SCREAMING_SNAKE_CASE : Dict = node_to_insert
def lowerCamelCase_ ( self : str , lowerCamelCase_ : int , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : Tuple = Node(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCamelCase_ , lowerCamelCase_ )
return
current_position += 1
SCREAMING_SNAKE_CASE : int = node.next
self.insert_after_node(self.tail , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.head
while node:
if node.get_data() == item:
return node
SCREAMING_SNAKE_CASE : int = node.get_next()
raise Exception("""Node not found""" )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] ):
'''simple docstring'''
if (node := self.get_node(lowerCamelCase_ )) is not None:
if node == self.head:
SCREAMING_SNAKE_CASE : Any = self.head.get_next()
if node == self.tail:
SCREAMING_SNAKE_CASE : Tuple = self.tail.get_previous()
self.remove_node_pointers(lowerCamelCase_ )
@staticmethod
def lowerCamelCase_ ( lowerCamelCase_ : Node ):
'''simple docstring'''
if node.get_next():
SCREAMING_SNAKE_CASE : Optional[int] = node.previous
if node.get_previous():
SCREAMING_SNAKE_CASE : List[Any] = node.next
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : List[Any] = None
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.head is None
def __A ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Any = pad_token_id
SCREAMING_SNAKE_CASE : List[Any] = max_length
SCREAMING_SNAKE_CASE : Optional[int] = vocab
SCREAMING_SNAKE_CASE : List[Any] = merges
SCREAMING_SNAKE_CASE : Tuple = BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : GPTaTokenizer , *lowerCamelCase_ : str , **lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [""" """.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab()
return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
return cls(**lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tf_tokenizer(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.ones_like(lowerCamelCase_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = pad_model_inputs(
lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 323 | 1 |
'''simple docstring'''
import math
def __A ( lowerCamelCase_ = 1_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = sum(i * i for i in range(1 , n + 1 ) )
SCREAMING_SNAKE_CASE : Tuple = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 323 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths
SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train"""
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Union[str, Any] = streaming
SCREAMING_SNAKE_CASE : Optional[int] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : int = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Tuple = streaming
SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
| 323 | 1 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = """"""
SCREAMING_SNAKE_CASE : Optional[Any] = """"""
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 2_56
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Tuple = 0
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = cva.imread(lowerCamelCase_ , 0 )
SCREAMING_SNAKE_CASE : List[Any] = copy.deepcopy(self.img )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label="""x""" )
SCREAMING_SNAKE_CASE : Optional[Any] = np.sum(lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE : str = x[i] / self.k
self.sk += prk
SCREAMING_SNAKE_CASE : Any = (self.L - 1) * self.sk
if self.rem != 0:
SCREAMING_SNAKE_CASE : Dict = int(last % last )
SCREAMING_SNAKE_CASE : Optional[Any] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = int(np.ma.count(self.img ) / self.img[1].size )
SCREAMING_SNAKE_CASE : Optional[Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
SCREAMING_SNAKE_CASE : str = self.img[j][i]
if num != self.last_list[num]:
SCREAMING_SNAKE_CASE : Optional[int] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
__UpperCAmelCase = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
__UpperCAmelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 323 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = """ylacombe/bark-small"""
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : str = """en_speaker_1"""
SCREAMING_SNAKE_CASE : Optional[int] = """This is a test string"""
SCREAMING_SNAKE_CASE : Optional[int] = """speaker_embeddings_path.json"""
SCREAMING_SNAKE_CASE : List[Any] = """speaker_embeddings"""
def lowerCamelCase_ ( self : int , **lowerCamelCase_ : int ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE : int = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
SCREAMING_SNAKE_CASE : List[str] = 35
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : int = 8
SCREAMING_SNAKE_CASE : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string )
SCREAMING_SNAKE_CASE : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 323 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE : List[str] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
SCREAMING_SNAKE_CASE : int = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
SCREAMING_SNAKE_CASE : Dict = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , lowerCamelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowerCamelCase_ ( self : str , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , **lowerCamelCase_ : Any ):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE : Any = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[int] = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase_ )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase_ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 )
SCREAMING_SNAKE_CASE : List[Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : str = image_processor(lowerCamelCase_ , return_tensors="""np""" )
SCREAMING_SNAKE_CASE : int = processor(images=lowerCamelCase_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.get_image_processor()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = """lower newer"""
SCREAMING_SNAKE_CASE : int = processor(text=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(lowerCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[Any] = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = """lower newer"""
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : str = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE : int = processor.batch_decode(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_image_processor()
SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE : int = CLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = """lower newer"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 323 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCAmelCase = logging.getLogger(__name__)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE : Dict = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE : Optional[int] = processor.get_labels()
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
SCREAMING_SNAKE_CASE : Optional[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE : Dict = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCamelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCamelCase_ , p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Any = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.evaluate()
SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowerCamelCase_ , lowerCamelCase_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowerCamelCase_ )
return results
def __A ( lowerCamelCase_ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 323 | 1 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if not sentence:
return ""
SCREAMING_SNAKE_CASE : Any = dict(zip(lowerCamelCase_ , lowerCamelCase_ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 323 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block
SCREAMING_SNAKE_CASE : int = torch.nn.Convad(
lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Any = output_channel
SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block(
lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
self.down_blocks.append(lowerCamelCase_ )
# mid
SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# out
SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU()
SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Tuple = False
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = x
SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[Any] ):
def custom_forward(*lowerCamelCase_ : List[str] ):
return module(*lowerCamelCase_ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ )
# post-process
SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : int = layers_per_block
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad(
lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] )
SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None
# mid
SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# up
SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : List[Any] = get_up_block(
lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , )
self.up_blocks.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : Dict = nn.SiLU()
SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Dict = False
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = z
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[str] ):
def custom_forward(*lowerCamelCase_ : str ):
return module(*lowerCamelCase_ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = n_e
SCREAMING_SNAKE_CASE : int = vq_embed_dim
SCREAMING_SNAKE_CASE : Tuple = beta
SCREAMING_SNAKE_CASE : Union[str, Any] = legacy
SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE : Optional[Any] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0]
SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed
SCREAMING_SNAKE_CASE : Any = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
SCREAMING_SNAKE_CASE : Optional[int] = n_e
SCREAMING_SNAKE_CASE : Any = sane_index_shape
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 )
SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE : Any = self.unknown_index
return new.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero
SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ )
return back.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : List[str] = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
if self.remap is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ )
if shape is not None:
SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parameters
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 )
SCREAMING_SNAKE_CASE : Dict = deterministic
SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = randn_tensor(
self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample
return x
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.mean
| 323 | 1 |
'''simple docstring'''
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = current_set.copy()
for row_index, row in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Dict = row[0]
for column_index, column in enumerate(lowerCamelCase_ ):
if magnitude == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = column
continue
SCREAMING_SNAKE_CASE : Optional[int] = column / magnitude
# Subtract to cancel term
SCREAMING_SNAKE_CASE : Optional[Any] = current_set[0]
SCREAMING_SNAKE_CASE : Tuple = [first_row]
SCREAMING_SNAKE_CASE : List[str] = current_set[1::]
for row in current_set:
SCREAMING_SNAKE_CASE : str = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase_ )
continue
for column_index in range(len(lowerCamelCase_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
SCREAMING_SNAKE_CASE : int = final_set[0]
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : List[str] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
SCREAMING_SNAKE_CASE : List[Any] = simplify(lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = resultant
return final_set
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if len(lowerCamelCase_ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
SCREAMING_SNAKE_CASE : List[Any] = len(lowerCamelCase_ ) + 1
if any(len(lowerCamelCase_ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(lowerCamelCase_ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(lowerCamelCase_ ) == 1:
return [equations[0][-1] / equations[0][0]]
SCREAMING_SNAKE_CASE : str = equations.copy()
if any(0 in row for row in data_set ):
SCREAMING_SNAKE_CASE : Dict = data_set.copy()
SCREAMING_SNAKE_CASE : List[str] = []
for row_index, row in enumerate(lowerCamelCase_ ):
if 0 not in row:
SCREAMING_SNAKE_CASE : int = data_set.pop(lowerCamelCase_ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = data_set.copy()
SCREAMING_SNAKE_CASE : str = simplify(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = simplified[::-1]
SCREAMING_SNAKE_CASE : list = []
for row in simplified:
SCREAMING_SNAKE_CASE : List[str] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
SCREAMING_SNAKE_CASE : str = row.copy()[: len(lowerCamelCase_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase_ ) == 0:
solutions.append(0 )
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = temp_row[1::]
SCREAMING_SNAKE_CASE : Optional[Any] = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase_ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = []
for item in solutions:
final.append(float(round(lowerCamelCase_ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 323 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxAutoencoderKL
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : str = 3
SCREAMING_SNAKE_CASE : List[Any] = (32, 32)
SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Any = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input
return init_dict, inputs_dict
| 323 | 1 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__UpperCAmelCase = re.compile(r"""\s+""")
def __A ( lowerCamelCase_ ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(lowerCamelCase_ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [len(lowerCamelCase_ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(lowerCamelCase_ ), "line_max": max(lowerCamelCase_ )}
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def __A ( lowerCamelCase_ , lowerCamelCase_=5 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = ["""auto-generated""", """autogenerated""", """automatically generated"""]
SCREAMING_SNAKE_CASE : List[Any] = example["""content"""].splitlines()
for _, line in zip(range(lowerCamelCase_ ) , lowerCamelCase_ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __A ( lowerCamelCase_ , lowerCamelCase_=5 , lowerCamelCase_=0.05 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ["""unit tests""", """test file""", """configuration file"""]
SCREAMING_SNAKE_CASE : Optional[int] = example["""content"""].splitlines()
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[int] = 0
# first test
for _, line in zip(range(lowerCamelCase_ ) , lowerCamelCase_ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
SCREAMING_SNAKE_CASE : int = example["""content"""].count("""\n""" )
SCREAMING_SNAKE_CASE : List[Any] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ["""def """, """class """, """for """, """while """]
SCREAMING_SNAKE_CASE : Optional[int] = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __A ( lowerCamelCase_ , lowerCamelCase_=4 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = example["""content"""].splitlines()
SCREAMING_SNAKE_CASE : Dict = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(example["""content"""] , truncation=lowerCamelCase_ )["""input_ids"""]
SCREAMING_SNAKE_CASE : str = len(example["""content"""] ) / len(lowerCamelCase_ )
return {"ratio": ratio}
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = {}
results.update(get_hash(lowerCamelCase_ ) )
results.update(line_stats(lowerCamelCase_ ) )
results.update(alpha_stats(lowerCamelCase_ ) )
results.update(char_token_ratio(lowerCamelCase_ ) )
results.update(is_autogenerated(lowerCamelCase_ ) )
results.update(is_config_or_test(lowerCamelCase_ ) )
results.update(has_no_keywords(lowerCamelCase_ ) )
results.update(has_few_assignments(lowerCamelCase_ ) )
return results
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if not check_uniques(lowerCamelCase_ , lowerCamelCase_ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __A ( lowerCamelCase_ ):
"""simple docstring"""
with open(lowerCamelCase_ , """rb""" ) as f_in:
with gzip.open(str(lowerCamelCase_ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCamelCase_ , lowerCamelCase_ )
os.unlink(lowerCamelCase_ )
# Settings
__UpperCAmelCase = HfArgumentParser(PreprocessingArguments)
__UpperCAmelCase = parser.parse_args()
if args.num_workers is None:
__UpperCAmelCase = multiprocessing.cpu_count()
__UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__UpperCAmelCase = time.time()
__UpperCAmelCase = load_dataset(args.dataset_name, split="""train""")
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__UpperCAmelCase = time.time()
__UpperCAmelCase = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__UpperCAmelCase = set(ds.unique("""hash"""))
__UpperCAmelCase = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__UpperCAmelCase = time.time()
__UpperCAmelCase = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__UpperCAmelCase = time.time()
__UpperCAmelCase , __UpperCAmelCase = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__UpperCAmelCase = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
__UpperCAmelCase = output_dir / """data"""
data_dir.mkdir(exist_ok=True)
__UpperCAmelCase = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__UpperCAmelCase = str(data_dir / f'''file-{file_number+1:012}.json''')
__UpperCAmelCase = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 323 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
pass
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ )
import datasets
SCREAMING_SNAKE_CASE : List[str] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
SCREAMING_SNAKE_CASE : Any = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , lowerCamelCase_ , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = """Intel/dpt-large"""
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""depth-estimation""" , model=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
SCREAMING_SNAKE_CASE : str = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 323 | 1 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxAutoencoderKL
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : str = 3
SCREAMING_SNAKE_CASE : List[Any] = (32, 32)
SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Any = jax.random.uniform(lowerCamelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input
return init_dict, inputs_dict
| 323 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=13 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Dict=2_24 , lowerCamelCase_ : List[Any]=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Any = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Dict = min_resolution
SCREAMING_SNAKE_CASE : List[str] = max_resolution
SCREAMING_SNAKE_CASE : str = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean
SCREAMING_SNAKE_CASE : str = image_std
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ViTImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = EfficientFormerImageProcessorTester(self )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 323 | 1 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = """pt"""
elif is_tf_available():
__UpperCAmelCase = """tf"""
else:
__UpperCAmelCase = """jax"""
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ByTaTokenizer
SCREAMING_SNAKE_CASE__ = False
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : List[str] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def lowerCamelCase_ ( self : Dict , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : List[str]=20 , lowerCamelCase_ : Optional[int]=5 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(len(lowerCamelCase_ ) ):
try:
SCREAMING_SNAKE_CASE : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE : List[str] = list(filter(lambda lowerCamelCase_ : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCamelCase_ ) , lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
SCREAMING_SNAKE_CASE : Dict = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
SCREAMING_SNAKE_CASE : int = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE : int = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.decode(lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
SCREAMING_SNAKE_CASE : Tuple = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCamelCase_ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE : str = """ """ + output_txt
SCREAMING_SNAKE_CASE : str = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE : int = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE : Any = """Unicode €."""
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded["""input_ids"""] , lowerCamelCase_ )
# decoding
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , """Unicode €.</s>""" )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer("""e è é ê ë""" )
SCREAMING_SNAKE_CASE : Tuple = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded["""input_ids"""] , lowerCamelCase_ )
# decoding
SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
SCREAMING_SNAKE_CASE : List[str] = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE : str = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE : Optional[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
SCREAMING_SNAKE_CASE : int = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , lowerCamelCase_ )
self.assertIn("""attention_mask""" , lowerCamelCase_ )
self.assertNotIn("""decoder_input_ids""" , lowerCamelCase_ )
self.assertNotIn("""decoder_attention_mask""" , lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE : int = [
"""Summary of the text.""",
"""Another summary.""",
]
SCREAMING_SNAKE_CASE : int = tokenizer(
text_target=lowerCamelCase_ , max_length=32 , padding="""max_length""" , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE : Dict = ["""A long paragraph for summarization. </s>"""]
SCREAMING_SNAKE_CASE : Dict = ["""Summary of the text. </s>"""]
# fmt: off
SCREAMING_SNAKE_CASE : Union[str, Any] = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
SCREAMING_SNAKE_CASE : int = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE : int = tokenizer(lowerCamelCase_ , text_target=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , batch["""input_ids"""][0] )
self.assertEqual(lowerCamelCase_ , batch["""labels"""][0] )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Union[str, Any] = """ He is very happy, UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = after_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[str] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = after_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.__class__.from_pretrained(lowerCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
SCREAMING_SNAKE_CASE : str = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = [f'''<extra_id_{i}>''' for i in range(1_25 )]
SCREAMING_SNAKE_CASE : List[str] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
SCREAMING_SNAKE_CASE : Union[str, Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(lowerCamelCase_ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_class.from_pretrained(
lowerCamelCase_ , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE : Tuple = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=lowerCamelCase_ )]
SCREAMING_SNAKE_CASE : Tuple = tokenizer_class.from_pretrained(
lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = tokenizer_class.from_pretrained(lowerCamelCase_ )
self.assertTrue(tokenizer.decode([2_55] ) == """""" )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_tokenizers(fast=lowerCamelCase_ , do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
SCREAMING_SNAKE_CASE : int = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE : List[Any] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_ids_to_tokens(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
for attr in attributes_list:
setattr(lowerCamelCase_ , attr + """_id""" , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , attr + """_id""" ) , lowerCamelCase_ )
setattr(lowerCamelCase_ , attr + """_id""" , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , attr + """_id""" ) , lowerCamelCase_ )
setattr(lowerCamelCase_ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(lowerCamelCase_ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(lowerCamelCase_ , """additional_special_tokens_ids""" ) , [] )
setattr(lowerCamelCase_ , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowerCamelCase_ , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowerCamelCase_ , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 323 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCAmelCase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''pixel_values''']
def __init__( self : Tuple , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Dict[str, int]] = None , lowerCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase_ : bool = True , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : bool = True , lowerCamelCase_ : Union[int, float] = 1 / 2_55 , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , **lowerCamelCase_ : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else {"""shortest_edge""": 2_56}
SCREAMING_SNAKE_CASE : List[str] = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
SCREAMING_SNAKE_CASE : List[str] = get_size_dict(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : List[Any] = resample
SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop
SCREAMING_SNAKE_CASE : Dict = crop_size
SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale
SCREAMING_SNAKE_CASE : Tuple = rescale_factor
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_resize_output_image_size(lowerCamelCase_ , size=size["""shortest_edge"""] , default_to_square=lowerCamelCase_ )
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = get_size_dict(lowerCamelCase_ )
return center_crop(lowerCamelCase_ , size=(size["""height"""], size["""width"""]) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : float , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Union[float, List[float]] , lowerCamelCase_ : Union[float, List[float]] , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : ImageInput , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : PILImageResampling = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[float] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , lowerCamelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCamelCase_ : List[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : str = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : Optional[Any] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : List[str] = get_size_dict(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Tuple = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Union[str, Any] = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Optional[int] = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : List[str] = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : int = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
SCREAMING_SNAKE_CASE : List[str] = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
SCREAMING_SNAKE_CASE : List[str] = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
| 323 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 323 | 1 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
__UpperCAmelCase = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = torch.load(lowerCamelCase_ , map_location="""cpu""" )
return sd
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = OrderedDict()
SCREAMING_SNAKE_CASE : List[Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE : str = new_key.replace(name_pair[0] , name_pair[1] )
SCREAMING_SNAKE_CASE : List[str] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE : str = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE : Dict = """pretraining"""
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""visual_embedding_dim""": 5_12}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : Dict = {"""visual_embedding_dim""": 20_48}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Tuple = {"""visual_embedding_dim""": 20_48}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = {"""visual_embedding_dim""": 10_24}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = {"""visual_embedding_dim""": 5_12}
SCREAMING_SNAKE_CASE : List[Any] = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : List[str] = {"""visual_embedding_dim""": 20_48}
SCREAMING_SNAKE_CASE : Optional[int] = """vqa_advanced"""
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""visual_embedding_dim""": 20_48, """num_labels""": 31_29}
SCREAMING_SNAKE_CASE : List[Any] = """vqa"""
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : List[str] = {
"""visual_embedding_dim""": 10_24,
"""num_labels""": 2,
}
SCREAMING_SNAKE_CASE : Tuple = """nlvr"""
SCREAMING_SNAKE_CASE : int = VisualBertConfig(**lowerCamelCase_ )
# Load State Dict
SCREAMING_SNAKE_CASE : str = load_state_dict(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = get_new_dict(lowerCamelCase_ , lowerCamelCase_ )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE : Any = VisualBertForPreTraining(lowerCamelCase_ )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE : List[str] = VisualBertForQuestionAnswering(lowerCamelCase_ )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE : Optional[int] = VisualBertForVisualReasoning(lowerCamelCase_ )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE : List[str] = VisualBertForMultipleChoice(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# Save Checkpoints
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
__UpperCAmelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 323 |
'''simple docstring'''
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number | (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number & ~(1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number ^ (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 | 1 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Tuple = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i, rect in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 )
target.move_to(lowerCamelCase_ )
model_arr.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCamelCase_ )
self.add(*lowerCamelCase_ , *lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE : Optional[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 )
input.set_fill(lowerCamelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 )
self.play(Write(lowerCamelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.02 )
self.play(MoveToTarget(lowerCamelCase_ ) )
self.play(FadeOut(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE : Optional[int] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE : Any = AnimationGroup(
FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowerCamelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE : Optional[Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = a_c
SCREAMING_SNAKE_CASE : Optional[Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) )
self.wait()
| 323 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=14 , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : int=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Union[str, Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=5_12 , lowerCamelCase_ : Union[str, Any]=0.02 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = rotary_dim
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = vocab_size - 1
SCREAMING_SNAKE_CASE : str = vocab_size - 1
SCREAMING_SNAKE_CASE : List[Any] = vocab_size - 1
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[str] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 20
SCREAMING_SNAKE_CASE : Any = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE : Any = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : str = model(
input_ids[:, -1:] , attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 20
SCREAMING_SNAKE_CASE : Dict = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
SCREAMING_SNAKE_CASE : str = model.init_cache(input_ids.shape[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE : Any = model(
input_ids[:, :-1] , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE : Dict = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCamelCase_ , position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
SCREAMING_SNAKE_CASE__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxGPTJModelTester(self )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@tooslow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = model.config.eos_token_id
SCREAMING_SNAKE_CASE : str = jax.jit(model.generate )
SCREAMING_SNAKE_CASE : str = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Optional[int] = pt_model_class(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = pt_model(**lowerCamelCase_ ).to_tuple()
SCREAMING_SNAKE_CASE : Any = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(lowerCamelCase_ , from_pt=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = fx_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = pt_model_class(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : List[Any] = load_flax_weights_in_pytorch_model(lowerCamelCase_ , fx_model.params )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Tuple = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = pt_model(**lowerCamelCase_ ).to_tuple()
SCREAMING_SNAKE_CASE : Optional[Any] = fx_model(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = pt_model_class.from_pretrained(lowerCamelCase_ , from_flax=lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = pt_model_loaded(**lowerCamelCase_ ).to_tuple()
self.assertEqual(
len(lowerCamelCase_ ) , len(lowerCamelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 323 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__UpperCAmelCase = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
__UpperCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
__UpperCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = RealmTokenizer
def __init__( self : int , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Tuple="[UNK]" , lowerCamelCase_ : Dict="[SEP]" , lowerCamelCase_ : Dict="[PAD]" , lowerCamelCase_ : Union[str, Any]="[CLS]" , lowerCamelCase_ : Tuple="[MASK]" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Tuple=None , **lowerCamelCase_ : List[Any] , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase_ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : List[Any] = getattr(lowerCamelCase_ , normalizer_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
SCREAMING_SNAKE_CASE : Optional[int] = strip_accents
SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : List[str] = normalizer_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = do_lower_case
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = PaddingStrategy.MAX_LENGTH
SCREAMING_SNAKE_CASE : str = text
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("""text_pair""" , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = kwargs.pop("""return_tensors""" , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(lowerCamelCase_ ):
if batch_text_pair is not None:
SCREAMING_SNAKE_CASE : Optional[int] = batch_text_pair[idx]
else:
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : List[Any] = super().__call__(lowerCamelCase_ , lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = encoded_candidates.get("""input_ids""" )
SCREAMING_SNAKE_CASE : Optional[int] = encoded_candidates.get("""attention_mask""" )
SCREAMING_SNAKE_CASE : Any = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(lowerCamelCase_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(lowerCamelCase_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {key: item for key, item in output_data.items() if len(lowerCamelCase_ ) != 0}
return BatchEncoding(lowerCamelCase_ , tensor_type=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 323 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim
SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim
SCREAMING_SNAKE_CASE : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
SCREAMING_SNAKE_CASE : str = (
nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
SCREAMING_SNAKE_CASE : Any = GPTaConfig(
vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 )
SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ):
'''simple docstring'''
return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.encode_prefix(lowerCamelCase_ )
@torch.no_grad()
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 )
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Tuple = []
for feature in features:
SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam(
input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = eos_token_id
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool )
if input_embeds is not None:
SCREAMING_SNAKE_CASE : Dict = input_embeds
else:
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log()
if scores is None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
SCREAMING_SNAKE_CASE : List[Any] = next_tokens
else:
SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] )
SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 )
else:
SCREAMING_SNAKE_CASE : Tuple = -float(np.inf )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source]
SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 )
SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source]
SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source]
SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths
SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source]
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 )
SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze()
if is_stopped.all():
break
SCREAMING_SNAKE_CASE : int = scores / seq_lengths
SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ )
# tokens tensors are already padded to max_seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order]
SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 323 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__UpperCAmelCase = False
@skip_mps
class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionAttendAndExcitePipeline
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCamelCase_ ( cls : List[str] ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : Any ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : str = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTextModel(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=0 ):
'''simple docstring'''
if str(lowerCamelCase_ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE : str = torch.manual_seed(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = """cpu"""
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = pipe(**lowerCamelCase_ ).images
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
SCREAMING_SNAKE_CASE : Optional[Any] = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
SCREAMING_SNAKE_CASE : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase_ , 1e-3 )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5e-4 )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowerCamelCase_ ( cls : List[Any] ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[str] ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = torch.manual_seed(51 )
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=lowerCamelCase_ , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
SCREAMING_SNAKE_CASE : Optional[Any] = """a painting of an elephant with glasses"""
SCREAMING_SNAKE_CASE : Tuple = [5, 7]
SCREAMING_SNAKE_CASE : int = pipe(
prompt=lowerCamelCase_ , token_indices=lowerCamelCase_ , guidance_scale=7.5 , generator=lowerCamelCase_ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
SCREAMING_SNAKE_CASE : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 323 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git_vision_model'''
def __init__( self : int , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=2_24 , lowerCamelCase_ : Optional[Any]=16 , lowerCamelCase_ : Optional[Any]="quick_gelu" , lowerCamelCase_ : List[Any]=1e-5 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.02 , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = hidden_act
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Union[str, os.PathLike] , **lowerCamelCase_ : int ):
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE : Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''git'''
def __init__( self : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=3_05_22 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Any=6 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-12 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : Optional[Any]="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=1_01 , lowerCamelCase_ : Optional[Any]=1_02 , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
if vision_config is None:
SCREAMING_SNAKE_CASE : Any = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
SCREAMING_SNAKE_CASE : int = num_image_with_embedding
SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
SCREAMING_SNAKE_CASE : str = eos_token_id
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Tuple = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 323 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase__ ( lowercase_ , lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''nat'''
SCREAMING_SNAKE_CASE__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Dict , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Union[str, Any]=64 , lowerCamelCase_ : List[Any]=[3, 4, 6, 5] , lowerCamelCase_ : Union[str, Any]=[2, 4, 8, 16] , lowerCamelCase_ : Union[str, Any]=7 , lowerCamelCase_ : Union[str, Any]=3.0 , lowerCamelCase_ : int=True , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Tuple="gelu" , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : str=1e-5 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Dict=None , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : int = embed_dim
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : Optional[int] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = num_heads
SCREAMING_SNAKE_CASE : Tuple = kernel_size
SCREAMING_SNAKE_CASE : Union[str, Any] = mlp_ratio
SCREAMING_SNAKE_CASE : Dict = qkv_bias
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = drop_path_rate
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : List[Any] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
SCREAMING_SNAKE_CASE : str = layer_scale_init_value
SCREAMING_SNAKE_CASE : List[Any] = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 323 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE : Optional[int] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Tuple = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE : List[str] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i, rect in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 )
target.move_to(lowerCamelCase_ )
model_arr.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCamelCase_ )
self.add(*lowerCamelCase_ , *lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : Dict = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
SCREAMING_SNAKE_CASE : List[Any] = Text("""Disk""" , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE : Optional[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = Square(0.3 )
input.set_fill(lowerCamelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 )
self.play(Write(lowerCamelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.02 )
self.play(MoveToTarget(lowerCamelCase_ ) )
self.play(FadeOut(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE : Optional[int] = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE : Optional[int] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCamelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE : Any = AnimationGroup(
FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowerCamelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE : Optional[Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = a_c
SCREAMING_SNAKE_CASE : Optional[Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) )
self.wait()
| 323 | 1 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Any ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] ):
'''simple docstring'''
return f'''`pip install {cls.pip_package or cls.name}`'''
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''optuna'''
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_optuna_available()
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return run_hp_search_optuna(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : str ):
'''simple docstring'''
return default_hp_space_optuna(lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''ray'''
SCREAMING_SNAKE_CASE__ = '''\'ray[tune]\''''
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_ray_available()
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Any ):
'''simple docstring'''
return run_hp_search_ray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return default_hp_space_ray(lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''sigopt'''
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_sigopt_available()
def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : str ):
'''simple docstring'''
return run_hp_search_sigopt(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Dict ):
'''simple docstring'''
return default_hp_space_sigopt(lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''wandb'''
@staticmethod
def lowerCamelCase_ ( ):
'''simple docstring'''
return is_wandb_available()
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
return run_hp_search_wandb(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int ):
'''simple docstring'''
return default_hp_space_wandb(lowerCamelCase_ )
__UpperCAmelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowerCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE : List[Any] = available_backends[0].name
if len(lowerCamelCase_ ) > 1:
logger.info(
f'''{len(lowerCamelCase_ )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 323 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE : dict[str, str | None] = {}
SCREAMING_SNAKE_CASE : List[str] = source_vertex
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex}
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE : str = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vertex
queue.append(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE : Tuple = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowerCamelCase_ )
return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}'''
if __name__ == "__main__":
__UpperCAmelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 323 | 1 |
'''simple docstring'''
__UpperCAmelCase = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__UpperCAmelCase = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__UpperCAmelCase = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__UpperCAmelCase = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__UpperCAmelCase = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__UpperCAmelCase = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__UpperCAmelCase = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__UpperCAmelCase = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 323 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__UpperCAmelCase = 0
__UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__UpperCAmelCase = tuple[int, int]
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Node | None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = pos_x
SCREAMING_SNAKE_CASE : Any = pos_y
SCREAMING_SNAKE_CASE : Optional[int] = (pos_y, pos_x)
SCREAMING_SNAKE_CASE : Tuple = goal_x
SCREAMING_SNAKE_CASE : List[str] = goal_y
SCREAMING_SNAKE_CASE : Optional[Any] = g_cost
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : int = self.calculate_heuristic()
SCREAMING_SNAKE_CASE : Tuple = self.g_cost + self.h_cost
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.pos_x - self.goal_x
SCREAMING_SNAKE_CASE : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase_ ) + abs(lowerCamelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Optional[Any] , lowerCamelCase_ : Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [self.start]
SCREAMING_SNAKE_CASE : list[Node] = []
SCREAMING_SNAKE_CASE : str = False
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE : Optional[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase_ )
self.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_successors(lowerCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase_ )
else:
self.open_nodes.append(lowerCamelCase_ )
return [self.start.pos]
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
for action in delta:
SCREAMING_SNAKE_CASE : Dict = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) )
return successors
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Node | None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = node
SCREAMING_SNAKE_CASE : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE : Optional[Any] = current_node.parent
path.reverse()
return path
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : TPosition , lowerCamelCase_ : TPosition ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = AStar(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = False
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
SCREAMING_SNAKE_CASE : List[str] = self.fwd_astar.open_nodes.pop(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase_ , lowerCamelCase_ )
self.fwd_astar.closed_nodes.append(lowerCamelCase_ )
self.bwd_astar.closed_nodes.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = current_bwd_node
SCREAMING_SNAKE_CASE : Any = current_fwd_node
SCREAMING_SNAKE_CASE : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE : int = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase_ )
else:
astar.open_nodes.append(lowerCamelCase_ )
return [self.fwd_astar.start.pos]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Node , lowerCamelCase_ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.fwd_astar.retrace_path(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.bwd_astar.retrace_path(lowerCamelCase_ )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__UpperCAmelCase = (0, 0)
__UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__UpperCAmelCase = time.time()
__UpperCAmelCase = AStar(init, goal)
__UpperCAmelCase = a_star.search()
__UpperCAmelCase = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
__UpperCAmelCase = time.time()
__UpperCAmelCase = BidirectionalAStar(init, goal)
__UpperCAmelCase = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 323 | 1 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : WhisperForConditionalGeneration , lowerCamelCase_ : WhisperProcessor , lowerCamelCase_ : AutoencoderKL , lowerCamelCase_ : CLIPTextModel , lowerCamelCase_ : CLIPTokenizer , lowerCamelCase_ : UNetaDConditionModel , lowerCamelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase_ : StableDiffusionSafetyChecker , lowerCamelCase_ : CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=lowerCamelCase_ , speech_processor=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
SCREAMING_SNAKE_CASE : Dict = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase_ )
@torch.no_grad()
def __call__( self : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any]=1_60_00 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : float = 7.5 , lowerCamelCase_ : Optional[Union[str, List[str]]] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ : int = 1 , **lowerCamelCase_ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.speech_processor.feature_extractor(
lowerCamelCase_ , return_tensors="""pt""" , sampling_rate=lowerCamelCase_ ).input_features.to(self.device )
SCREAMING_SNAKE_CASE : List[str] = self.speech_model.generate(lowerCamelCase_ , max_length=48_00_00 )
SCREAMING_SNAKE_CASE : int = self.speech_processor.tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , normalize=lowerCamelCase_ )[
0
]
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = 1
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = len(lowerCamelCase_ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase_ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(lowerCamelCase_ )}.''' )
# get prompt text embeddings
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
lowerCamelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE : List[str] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE : Dict = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
SCREAMING_SNAKE_CASE : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE : Optional[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = text_embeddings.shape
SCREAMING_SNAKE_CASE : Tuple = text_embeddings.repeat(1 , lowerCamelCase_ , 1 )
SCREAMING_SNAKE_CASE : int = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCamelCase_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
SCREAMING_SNAKE_CASE : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE : List[str]
if negative_prompt is None:
SCREAMING_SNAKE_CASE : List[Any] = [""""""] * batch_size
elif type(lowerCamelCase_ ) is not type(lowerCamelCase_ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase_ )} !='''
f''' {type(lowerCamelCase_ )}.''' )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = [negative_prompt]
elif batch_size != len(lowerCamelCase_ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase_ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
SCREAMING_SNAKE_CASE : List[str] = negative_prompt
SCREAMING_SNAKE_CASE : str = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(
lowerCamelCase_ , padding="""max_length""" , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE : List[Any] = uncond_embeddings.shape[1]
SCREAMING_SNAKE_CASE : Optional[Any] = uncond_embeddings.repeat(1 , lowerCamelCase_ , 1 )
SCREAMING_SNAKE_CASE : int = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCamelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE : Tuple = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
SCREAMING_SNAKE_CASE : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
SCREAMING_SNAKE_CASE : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
SCREAMING_SNAKE_CASE : Tuple = torch.randn(lowerCamelCase_ , generator=lowerCamelCase_ , device="""cpu""" , dtype=lowerCamelCase_ ).to(
self.device )
else:
SCREAMING_SNAKE_CASE : Any = torch.randn(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=lowerCamelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
SCREAMING_SNAKE_CASE : Any = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
SCREAMING_SNAKE_CASE : Any = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE : Any = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE : List[str] = {}
if accepts_eta:
SCREAMING_SNAKE_CASE : Optional[int] = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# predict the noise residual
SCREAMING_SNAKE_CASE : Tuple = self.unet(lowerCamelCase_ , lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ ).sample
# perform guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE : str = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = 1 / 0.18_215 * latents
SCREAMING_SNAKE_CASE : Dict = self.vae.decode(lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase_ , nsfw_content_detected=lowerCamelCase_ )
| 323 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''efficientnet'''
def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : int = width_coefficient
SCREAMING_SNAKE_CASE : List[str] = depth_coefficient
SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisor
SCREAMING_SNAKE_CASE : List[str] = kernel_sizes
SCREAMING_SNAKE_CASE : Dict = in_channels
SCREAMING_SNAKE_CASE : List[str] = out_channels
SCREAMING_SNAKE_CASE : Any = depthwise_padding
SCREAMING_SNAKE_CASE : Dict = strides
SCREAMING_SNAKE_CASE : Optional[Any] = num_block_repeats
SCREAMING_SNAKE_CASE : Any = expand_ratios
SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dim
SCREAMING_SNAKE_CASE : List[str] = pooling_type
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = batch_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum
SCREAMING_SNAKE_CASE : Dict = dropout_rate
SCREAMING_SNAKE_CASE : int = drop_connect_rate
SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowerCamelCase_ ) * 4
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return 1e-5
| 323 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCamelCase_ , lowerCamelCase_ ) ) )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if dataset.ndim != value_array.ndim:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
"""Wrong input data's dimensions... """
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(lowerCamelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
SCREAMING_SNAKE_CASE : Dict = (
"""Wrong input data's shape... """
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(lowerCamelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
SCREAMING_SNAKE_CASE : List[str] = (
"""Input data have different datatype... """
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = []
for value in value_array:
SCREAMING_SNAKE_CASE : Union[str, Any] = euclidean(lowerCamelCase_ , dataset[0] )
SCREAMING_SNAKE_CASE : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
SCREAMING_SNAKE_CASE : Optional[int] = euclidean(lowerCamelCase_ , lowerCamelCase_ )
if dist > temp_dist:
SCREAMING_SNAKE_CASE : str = temp_dist
SCREAMING_SNAKE_CASE : Optional[Any] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return np.dot(lowerCamelCase_ , lowerCamelCase_ ) / (norm(lowerCamelCase_ ) * norm(lowerCamelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(lowercase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[int]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {}
SCREAMING_SNAKE_CASE : List[Any] = {}
if prompt is not None:
SCREAMING_SNAKE_CASE : List[Any] = prompt
if generate_kwargs is not None:
SCREAMING_SNAKE_CASE : Optional[int] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
SCREAMING_SNAKE_CASE : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , lowerCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ : Any ):
'''simple docstring'''
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(lowerCamelCase_ )
if prompt is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.model_type
if model_type == "git":
SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : str = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids
SCREAMING_SNAKE_CASE : Optional[int] = [self.tokenizer.cls_token_id] + input_ids
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
SCREAMING_SNAKE_CASE : int = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCamelCase_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
SCREAMING_SNAKE_CASE : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
SCREAMING_SNAKE_CASE : Optional[Any] = None
return model_inputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any]=None ):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , lowerCamelCase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
SCREAMING_SNAKE_CASE : List[str] = None
if generate_kwargs is None:
SCREAMING_SNAKE_CASE : int = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
SCREAMING_SNAKE_CASE : Tuple = model_inputs.pop(self.model.main_input_name )
SCREAMING_SNAKE_CASE : Any = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ )
return model_outputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for output_ids in model_outputs:
SCREAMING_SNAKE_CASE : List[Any] = {
"""generated_text""": self.tokenizer.decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , )
}
records.append(lowerCamelCase_ )
return records
| 323 | 1 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( lowercase_ , lowercase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : List[str] , lowerCamelCase_ : bool , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[int] = None ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : str = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
SCREAMING_SNAKE_CASE : Optional[Any] = torch.zeros(lowerCamelCase_ , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Optional[Any] = torch.nn.Parameter(lowerCamelCase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
def __init__( self : Optional[Any] , lowerCamelCase_ : VQModel , lowerCamelCase_ : CLIPTextModel , lowerCamelCase_ : CLIPTokenizer , lowerCamelCase_ : TransformeraDModel , lowerCamelCase_ : VQDiffusionScheduler , lowerCamelCase_ : LearnedClassifierFreeSamplingEmbeddings , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=lowerCamelCase_ , transformer=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , scheduler=lowerCamelCase_ , learned_classifier_free_sampling_embeddings=lowerCamelCase_ , )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = len(lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(
lowerCamelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE : List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE : Optional[int] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
SCREAMING_SNAKE_CASE : Optional[Any] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowerCamelCase_ )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE : List[Any] = prompt_embeds.repeat_interleave(lowerCamelCase_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.learned_classifier_free_sampling_embeddings.embeddings
SCREAMING_SNAKE_CASE : Tuple = negative_prompt_embeds.unsqueeze(0 ).repeat(lowerCamelCase_ , 1 , 1 )
else:
SCREAMING_SNAKE_CASE : Tuple = [""""""] * batch_size
SCREAMING_SNAKE_CASE : Optional[Any] = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(
lowerCamelCase_ , padding="""max_length""" , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowerCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE : Tuple = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE : Tuple = negative_prompt_embeds.repeat(1 , lowerCamelCase_ , 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowerCamelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE : Dict = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : List[Any] , lowerCamelCase_ : Union[str, List[str]] , lowerCamelCase_ : int = 1_00 , lowerCamelCase_ : float = 5.0 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ : int = 1 , ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = 1
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = len(lowerCamelCase_ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase_ )}''' )
SCREAMING_SNAKE_CASE : Tuple = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE : Dict = guidance_scale > 1.0
SCREAMING_SNAKE_CASE : str = self._encode_prompt(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(lowerCamelCase_ )}.''' )
# get the initial completely masked latents unless the user supplied it
SCREAMING_SNAKE_CASE : Optional[int] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
SCREAMING_SNAKE_CASE : Tuple = self.transformer.num_vector_embeds - 1
SCREAMING_SNAKE_CASE : List[str] = torch.full(lowerCamelCase_ , lowerCamelCase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
SCREAMING_SNAKE_CASE : Any = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase_ , device=self.device )
SCREAMING_SNAKE_CASE : List[str] = self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE : Optional[int] = latents
for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ):
# expand the sample if we are doing classifier free guidance
SCREAMING_SNAKE_CASE : List[Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
SCREAMING_SNAKE_CASE : Dict = self.transformer(lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , timestep=lowerCamelCase_ ).sample
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = model_output.chunk(2 )
SCREAMING_SNAKE_CASE : Optional[int] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowerCamelCase_ , dim=1 , keepdim=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.truncate(lowerCamelCase_ , lowerCamelCase_ )
# remove `log(0)`'s (`-inf`s)
SCREAMING_SNAKE_CASE : int = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE : Dict = self.scheduler.step(lowerCamelCase_ , timestep=lowerCamelCase_ , sample=lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.vqvae.config.vq_embed_dim
SCREAMING_SNAKE_CASE : Dict = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
SCREAMING_SNAKE_CASE : int = self.vqvae.quantize.get_codebook_entry(lowerCamelCase_ , shape=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.vqvae.decode(lowerCamelCase_ , force_not_quantize=lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : float ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = torch.sort(lowerCamelCase_ , 1 , descending=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.exp(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
SCREAMING_SNAKE_CASE : Optional[int] = torch.full_like(keep_mask[:, 0:1, :] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.cat((all_true, keep_mask) , dim=1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = keep_mask[:, :-1, :]
SCREAMING_SNAKE_CASE : Any = keep_mask.gather(1 , indices.argsort(1 ) )
SCREAMING_SNAKE_CASE : List[Any] = log_p_x_0.clone()
SCREAMING_SNAKE_CASE : List[Any] = -torch.inf # -inf = log(0)
return rv
| 323 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCamelCase_ )
return config
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0]
SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1]
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = 1
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = scheduler.timesteps
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCamelCase_ ):
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Dict = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0]
with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 323 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Any=18 , lowerCamelCase_ : Tuple=30 , lowerCamelCase_ : Optional[Any]=4_00 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Dict=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {"""shortest_edge""": 20}
SCREAMING_SNAKE_CASE : List[Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : List[Any] = min_resolution
SCREAMING_SNAKE_CASE : Tuple = max_resolution
SCREAMING_SNAKE_CASE : Tuple = do_resize
SCREAMING_SNAKE_CASE : List[str] = size
SCREAMING_SNAKE_CASE : List[str] = do_center_crop
SCREAMING_SNAKE_CASE : List[Any] = crop_size
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MobileNetVaImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """crop_size""" ) )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : str = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : int = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 323 |
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE : Optional[int] = Text(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE : int = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 323 | 1 |
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__UpperCAmelCase = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowerCamelCase_ ) , version.parse(lowerCamelCase_ ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def __A ( lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = f'''\n{hint}''' if hint is not None else """"""
# non-versioned check
if re.match(R"""^[\w_\-\d]+$""" , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = requirement, None, None
else:
SCREAMING_SNAKE_CASE : Any = re.findall(R"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , lowerCamelCase_ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
f''' got {requirement}''' )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = match[0]
SCREAMING_SNAKE_CASE : Tuple = want_full.split(""",""" ) # there could be multiple requirements
SCREAMING_SNAKE_CASE : List[str] = {}
for w in want_range:
SCREAMING_SNAKE_CASE : int = re.findall(R"""^([\s!=<>]{1,2})(.+)""" , lowerCamelCase_ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
f''' but got {requirement}''' )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = match[0]
SCREAMING_SNAKE_CASE : Tuple = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
SCREAMING_SNAKE_CASE : str = """.""".join([str(lowerCamelCase_ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return
# check if any version is installed
try:
SCREAMING_SNAKE_CASE : List[str] = importlib.metadata.version(lowerCamelCase_ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(lowerCamelCase_ , lowerCamelCase_ )
| 323 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 3_84
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE : List[str] = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE : Any = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE : int = 1_28
SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE : Optional[Any] = 12
SCREAMING_SNAKE_CASE : str = 5_12
elif "large" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 1_92
SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE : Tuple = 12
SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68
# set label information
SCREAMING_SNAKE_CASE : List[str] = 1_50
SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files"""
SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
SCREAMING_SNAKE_CASE : List[str] = UperNetConfig(
backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , )
return config
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = val
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE : Dict = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :]
# fmt: on
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape
SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape
SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = x.shape[0]
SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(lowerCamelCase_ , param.shape )
SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ )
if "bn" in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" )
SCREAMING_SNAKE_CASE : Optional[Any] = val
# rename keys
SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ )
if "norm" in key:
SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify on image
SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCAmelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 323 | 1 |
'''simple docstring'''
from __future__ import annotations
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = str(lowerCamelCase_ )
return len(lowerCamelCase_ ) == 9 and set(lowerCamelCase_ ) == set("""123456789""" )
def __A ( ):
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
SCREAMING_SNAKE_CASE : Tuple = 10_00_02 * base_num
if is_9_pandigital(lowerCamelCase_ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
SCREAMING_SNAKE_CASE : str = 1_00_20_03 * base_num
if is_9_pandigital(lowerCamelCase_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 323 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Any = pad_token_id
SCREAMING_SNAKE_CASE : List[Any] = max_length
SCREAMING_SNAKE_CASE : Optional[int] = vocab
SCREAMING_SNAKE_CASE : List[Any] = merges
SCREAMING_SNAKE_CASE : Tuple = BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : GPTaTokenizer , *lowerCamelCase_ : str , **lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [""" """.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab()
return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
return cls(**lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tf_tokenizer(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.ones_like(lowerCamelCase_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = pad_model_inputs(
lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 323 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''deberta-v2'''
def __init__( self : Any , lowerCamelCase_ : int=12_81_00 , lowerCamelCase_ : Optional[int]=15_36 , lowerCamelCase_ : int=24 , lowerCamelCase_ : Union[str, Any]=24 , lowerCamelCase_ : Any=61_44 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Optional[Any]=5_12 , lowerCamelCase_ : List[str]=0 , lowerCamelCase_ : Dict=0.02 , lowerCamelCase_ : Tuple=1e-7 , lowerCamelCase_ : str=False , lowerCamelCase_ : List[str]=-1 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Dict=0 , lowerCamelCase_ : int="gelu" , **lowerCamelCase_ : Dict , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = relative_attention
SCREAMING_SNAKE_CASE : str = max_relative_positions
SCREAMING_SNAKE_CASE : int = pad_token_id
SCREAMING_SNAKE_CASE : List[Any] = position_biased_input
# Backwards compatibility
if type(lowerCamelCase_ ) == str:
SCREAMING_SNAKE_CASE : Tuple = [x.strip() for x in pos_att_type.lower().split("""|""" )]
SCREAMING_SNAKE_CASE : int = pos_att_type
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : int = kwargs.get("""pooler_hidden_size""" , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = pooler_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = pooler_hidden_act
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE : str = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return 12
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional["TensorType"] = None , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = super().generate_dummy_inputs(preprocessor=lowerCamelCase_ , framework=lowerCamelCase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 323 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths
SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train"""
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Union[str, Any] = streaming
SCREAMING_SNAKE_CASE : Optional[int] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : int = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Tuple = streaming
SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
| 323 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''new-model'''
if is_tf_available():
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = NewModelConfig
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = """bert-base-cased"""
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = TFAutoModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = """bert-base-cased"""
SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = TFAutoModelForPreTraining.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase_ , output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase_ , output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_ , output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
@slow
@require_tensorflow_probability
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCamelCase_ , output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase_ ) , 1_44_10 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase_ ) , 1_44_10 )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(model.config )
SCREAMING_SNAKE_CASE : Any = ["""FunnelBaseModel"""]
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModel.from_config(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModel.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
try:
AutoConfig.register("""new-model""" , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCamelCase_ ):
auto_class.register(lowerCamelCase_ , lowerCamelCase_ )
auto_class.register(lowerCamelCase_ , lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
auto_class.register(lowerCamelCase_ , lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE : Dict = BertModelTester(self ).get_config()
SCREAMING_SNAKE_CASE : List[Any] = NewModelConfig(**tiny_config.to_dict() )
SCREAMING_SNAKE_CASE : str = auto_class.from_config(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = auto_class.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
SCREAMING_SNAKE_CASE : List[str] = TFAutoModel.from_pretrained("""bert-base""" )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
SCREAMING_SNAKE_CASE : Dict = TFAutoModel.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
with self.assertRaisesRegex(lowerCamelCase_ , """Use `from_pt=True` to load this model""" ):
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE : Any = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
SCREAMING_SNAKE_CASE : Union[str, Any] = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE : Tuple = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 323 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = """ylacombe/bark-small"""
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : str = """en_speaker_1"""
SCREAMING_SNAKE_CASE : Optional[int] = """This is a test string"""
SCREAMING_SNAKE_CASE : Optional[int] = """speaker_embeddings_path.json"""
SCREAMING_SNAKE_CASE : List[Any] = """speaker_embeddings"""
def lowerCamelCase_ ( self : int , **lowerCamelCase_ : int ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE : int = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
SCREAMING_SNAKE_CASE : List[str] = 35
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : int = 8
SCREAMING_SNAKE_CASE : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string )
SCREAMING_SNAKE_CASE : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 323 | 1 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__UpperCAmelCase = input("""Enter image url: """).strip()
print(f'''Downloading image from {url} ...''')
__UpperCAmelCase = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
__UpperCAmelCase = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
__UpperCAmelCase = requests.get(image_url).content
__UpperCAmelCase = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(f'''Done. Image saved to disk as {file_name}.''')
| 323 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCAmelCase = logging.getLogger(__name__)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
SCREAMING_SNAKE_CASE__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE : Dict = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE : Optional[int] = processor.get_labels()
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
SCREAMING_SNAKE_CASE : Optional[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE : Dict = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCamelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCamelCase_ , p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Any = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.evaluate()
SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowerCamelCase_ , lowerCamelCase_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowerCamelCase_ )
return results
def __A ( lowerCamelCase_ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 323 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.