code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import os
def lowerCAmelCase ( UpperCamelCase__ : str = "matrix.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(UpperCamelCase__ ) , UpperCamelCase__ ) ) as in_file:
__UpperCAmelCase = in_file.read()
__UpperCAmelCase = [[int(UpperCamelCase__ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
__UpperCAmelCase = [[0 for cell in row] for row in grid]
__UpperCAmelCase = len(grid[0] )
__UpperCAmelCase = [[0 for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )]
__UpperCAmelCase = grid[0][0]
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = grid[0][i] + dp[0][i - 1]
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = grid[i][0] + dp[i - 1][0]
for i in range(1 , UpperCamelCase__ ):
for j in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 654 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[int] = "examples/"
__lowerCAmelCase : Dict = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__lowerCAmelCase : List[str] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__lowerCAmelCase : int = "README.md"
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
"""simple docstring"""
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase , __UpperCAmelCase = REPLACE_PATTERNS[pattern]
__UpperCAmelCase = replace.replace('''VERSION''' , UpperCamelCase__ )
__UpperCAmelCase = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern='''examples''' )
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Any=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not patch:
update_version_in_examples(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
__UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.readlines()
# Find the start of the list.
__UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase__ ).groups()[0]
return packaging.version.parse(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Any=False ):
"""simple docstring"""
__UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__UpperCAmelCase = default_version.base_version
elif patch:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = get_version()
__UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
__UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__lowerCAmelCase : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 654 | 1 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
__UpperCAmelCase = [True] * (num + 1)
__UpperCAmelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , UpperCamelCase__ ):
__UpperCAmelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Optional[Any] = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 654 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__UpperCAmelCase = len(UpperCamelCase__ )
__UpperCAmelCase = max(UpperCamelCase__ )
__UpperCAmelCase = min(UpperCamelCase__ )
# create the counting array
__UpperCAmelCase = coll_max + 1 - coll_min
__UpperCAmelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__UpperCAmelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
__UpperCAmelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__lowerCAmelCase : str = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 654 | 1 |
'''simple docstring'''
from functools import lru_cache
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = 2
__UpperCAmelCase = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCamelCase__ )
if n > 1:
factors.add(UpperCamelCase__ )
return factors
@lru_cache
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
return len(unique_prime_factors(UpperCamelCase__ ) )
def lowerCAmelCase ( UpperCamelCase__ : list ):
"""simple docstring"""
return len(set(UpperCamelCase__ ) ) in (0, 1)
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = 2
while True:
# Increment each value of a generated range
__UpperCAmelCase = [base + i for i in range(UpperCamelCase__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__UpperCAmelCase = [upf_len(UpperCamelCase__ ) for x in group]
checker.append(UpperCamelCase__ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCamelCase__ ):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase ( UpperCamelCase__ : int = 4 ):
"""simple docstring"""
__UpperCAmelCase = run(UpperCamelCase__ )
return results[0] if len(UpperCamelCase__ ) else None
if __name__ == "__main__":
print(solution())
| 654 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase ( UpperCamelCase__ : str = "AAPL" ):
"""simple docstring"""
__UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' )
__UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 654 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__UpperCAmelCase) , (__UpperCAmelCase)) = extended_euclid(UpperCamelCase__ , a % b )
__UpperCAmelCase = a // b
return (y, x - k * y)
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
((__UpperCAmelCase) , (__UpperCAmelCase)) = extended_euclid(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = na * na
__UpperCAmelCase = ra * x * na + ra * y * na
return (n % m + m) % m
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
((__UpperCAmelCase) , (__UpperCAmelCase)) = extended_euclid(UpperCamelCase__ , UpperCamelCase__ )
if b < 0:
__UpperCAmelCase = (b % n + n) % n
return b
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase = invert_modulo(UpperCamelCase__ , UpperCamelCase__ ), invert_modulo(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = na * na
__UpperCAmelCase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 654 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
__UpperCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i]
__UpperCAmelCase = []
__UpperCAmelCase = 0
__UpperCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__UpperCAmelCase = []
__UpperCAmelCase = -1
for i in range(UpperCamelCase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__UpperCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__UpperCAmelCase = 0
__UpperCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__lowerCAmelCase : List[Any] = 4
__lowerCAmelCase : List[Any] = [2, 5, 3, 7]
__lowerCAmelCase : Tuple = [0, 0, 0, 0]
__lowerCAmelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase : Dict = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 654 | 1 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError('''String lengths must match!''' )
__UpperCAmelCase = 0
for chara, chara in zip(UpperCamelCase__ , UpperCamelCase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[str] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : List[str] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : List[Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Optional[Any] , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Tuple , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : str , **__a : Tuple ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : int ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : List[str] , **__a : Optional[int] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Any ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Dict , **__a : List[str] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Optional[int] , **__a : Optional[int] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[str] , **__a : List[str] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[int] , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : str ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Any ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : str , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Optional[int] , **__a : Union[str, Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Union[str, Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Union[str, Any] , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : int , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : str ) -> Dict:
requires_backends(cls , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : str , **UpperCamelCase__ : str ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : str , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : str , **__a : List[str] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : List[Any] , **__a : List[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : Tuple ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : str , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : str ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : Tuple ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Tuple , **__a : str ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : str , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : int , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : str , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : int , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Union[str, Any] , **__a : Optional[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[Any] , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Dict ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Union[str, Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : Dict ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Tuple , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : Any ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Optional[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Union[str, Any] , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : Optional[int] , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Any , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : int , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Tuple , **__a : Optional[int] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : Tuple ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Union[str, Any] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[Any] , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : int , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Any , **__a : int ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Dict ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : int , **__a : Optional[int] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Dict , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Any , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : Tuple , **__a : Optional[int] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Optional[Any] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : Dict ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Union[str, Any] , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Any , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Union[str, Any] , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : List[Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Dict , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : Union[str, Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : int ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Optional[Any] , **__a : int ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[Any] , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Optional[Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[int] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[str] , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Tuple , **__a : Tuple ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[str] , **__a : int ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Tuple , **__a : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Any , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : str ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[str] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : str , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[Any] , **__a : List[str] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[str] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : str , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Tuple ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Any , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Tuple ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : int , **__a : Optional[Any] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Optional[int] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[str] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : List[str] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
| 654 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[str] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : List[str] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : List[Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Optional[Any] , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Tuple , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : str , **__a : Tuple ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : int ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : List[str] , **__a : Optional[int] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Any ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Dict , **__a : List[str] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Optional[int] , **__a : Optional[int] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[str] , **__a : List[str] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[int] , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : str ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Any ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : str , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Optional[int] , **__a : Union[str, Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Union[str, Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Union[str, Any] , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : int , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : str ) -> Dict:
requires_backends(cls , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : str , **UpperCamelCase__ : str ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : str , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : str , **__a : List[str] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : List[Any] , **__a : List[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : Tuple ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : str , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : str ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : Tuple ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Tuple , **__a : str ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : str , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : int , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : str , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : int , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Union[str, Any] , **__a : Optional[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[Any] , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Dict ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Union[str, Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : Dict ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Tuple , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : Any ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Optional[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Union[str, Any] , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : Optional[int] , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Any , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : int , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Tuple , **__a : Optional[int] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : Tuple ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Union[str, Any] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[Any] , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : int , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Any , **__a : int ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Dict ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : int , **__a : Optional[int] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Dict , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Any , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : Tuple , **__a : Optional[int] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Optional[Any] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : Dict ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Union[str, Any] , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Any , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Union[str, Any] , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : List[Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Dict , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : Union[str, Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : int ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Optional[Any] , **__a : int ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[Any] , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Optional[Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[int] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[str] , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Tuple , **__a : Tuple ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[str] , **__a : int ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Tuple , **__a : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Any , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : str ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[str] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : str , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[Any] , **__a : List[str] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[str] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : str , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Tuple ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Any , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Tuple ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : int , **__a : Optional[Any] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Optional[int] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[str] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : List[str] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
| 654 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 654 | 1 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase ( UpperCamelCase__ : str = "AAPL" ):
"""simple docstring"""
__UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' )
__UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 654 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[Any] = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 | 1 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
if index == r:
for j in range(UpperCamelCase__ ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__UpperCAmelCase = arr[i]
combination_util(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , index + 1 , UpperCamelCase__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any ):
"""simple docstring"""
# A temporary array to store all combination one by one
__UpperCAmelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , 0 , UpperCamelCase__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__lowerCAmelCase : List[str] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 654 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , ):
"""simple docstring"""
__UpperCAmelCase = {}
if train_file is not None:
__UpperCAmelCase = [train_file]
if eval_file is not None:
__UpperCAmelCase = [eval_file]
if test_file is not None:
__UpperCAmelCase = [test_file]
__UpperCAmelCase = datasets.load_dataset('''csv''' , data_files=UpperCamelCase__ )
__UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() )
__UpperCAmelCase = features_name.pop(UpperCamelCase__ )
__UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
__UpperCAmelCase = {label: i for i, label in enumerate(UpperCamelCase__ )}
__UpperCAmelCase = tokenizer.model_input_names
__UpperCAmelCase = {}
if len(UpperCamelCase__ ) == 1:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' ) , batched=UpperCamelCase__ , )
elif len(UpperCamelCase__ ) == 2:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , ) , batched=UpperCamelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class A :
a_ = field(metadata={'''help''': '''Which column contains the label'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the training file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the development file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the test file'''} )
a_ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
a_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def lowerCAmelCase ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase__ ) , labelaid=UpperCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict:
__UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__UpperCAmelCase = TFTrainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCAmelCase = trainer.evaluate()
__UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(UpperCamelCase__ )
return results
if __name__ == "__main__":
main()
| 654 | 1 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str ):
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = ParquetDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , split=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
if issubclass(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = parquet_path
elif issubclass(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = [parquet_path]
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=("train",) ):
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
for split in splits:
__UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
if split:
__UpperCAmelCase = {split: parquet_path}
else:
__UpperCAmelCase = '''train'''
__UpperCAmelCase = {'''train''': parquet_path, '''test''': parquet_path}
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase = ParquetDatasetWriter(UpperCamelCase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__UpperCAmelCase = pq.ParquetFile(tmp_path / '''foo.parquet''' )
__UpperCAmelCase = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = str(shared_datadir / '''test_image_rgb.jpg''' )
__UpperCAmelCase = {'''image''': [image_path]}
__UpperCAmelCase = Features({'''image''': Image()} )
__UpperCAmelCase = Dataset.from_dict(UpperCamelCase__ , features=UpperCamelCase__ )
__UpperCAmelCase = ParquetDatasetWriter(UpperCamelCase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__UpperCAmelCase = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
__UpperCAmelCase = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
assert get_writer_batch_size(UpperCamelCase__ ) == expected
| 654 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__( self : List[Any] , __a : Any , ) -> Dict:
__UpperCAmelCase = parent
__UpperCAmelCase = 1_3
__UpperCAmelCase = 7
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = 9_9
__UpperCAmelCase = 3_2
__UpperCAmelCase = 2
__UpperCAmelCase = 4
__UpperCAmelCase = 3_7
__UpperCAmelCase = '''gelu'''
__UpperCAmelCase = 0.1
__UpperCAmelCase = 0.1
__UpperCAmelCase = 5_1_2
__UpperCAmelCase = 1_6
__UpperCAmelCase = 2
__UpperCAmelCase = 0.0_2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
__UpperCAmelCase = None
def snake_case__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Union[str, Any] , __a : List[str] , __a : int , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : int ) -> Any:
__UpperCAmelCase = TFDistilBertModel(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
__UpperCAmelCase = [input_ids, input_mask]
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Tuple , __a : List[Any] , __a : int , __a : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : List[Any] ) -> int:
__UpperCAmelCase = TFDistilBertForMaskedLM(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[int] , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Optional[int] ) -> Dict:
__UpperCAmelCase = TFDistilBertForQuestionAnswering(config=__a )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Any , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : Dict , __a : int , __a : List[Any] ) -> Dict:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForSequenceClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] , __a : Dict ) -> str:
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = TFDistilBertForMultipleChoice(__a )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : Tuple , __a : int , __a : Optional[int] , __a : Optional[int] ) -> int:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForTokenClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : str ) -> Any:
__UpperCAmelCase = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = TFDistilBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , dim=3_7 )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ) -> str:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__a )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__a )
def snake_case__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__a )
def snake_case__ ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a )
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a )
def snake_case__ ( self : List[str] ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__a )
@slow
def snake_case__ ( self : Dict ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCAmelCase = TFDistilBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class A ( unittest.TestCase ):
@slow
def snake_case__ ( self : int ) -> Dict:
__UpperCAmelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase = model(__a )[0]
__UpperCAmelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , __a )
__UpperCAmelCase = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
| 654 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
__lowerCAmelCase : str = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
__lowerCAmelCase : Optional[Any] = {
"camembert-base": 512,
}
__lowerCAmelCase : str = "▁"
class A ( UpperCAmelCase ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , __a : int , __a : Union[str, Any]="<s>" , __a : Tuple="</s>" , __a : Optional[int]="</s>" , __a : Dict="<s>" , __a : Tuple="<unk>" , __a : List[str]="<pad>" , __a : List[str]="<mask>" , __a : List[Any]=["<s>NOTUSED", "</s>NOTUSED"] , __a : Optional[Dict[str, Any]] = None , **__a : Optional[int] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
__UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
__UpperCAmelCase = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__UpperCAmelCase = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
__UpperCAmelCase = len(self.fairseq_tokens_to_ids )
__UpperCAmelCase = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case__ ( self : Optional[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
__UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self : List[str] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def snake_case__ ( self : int , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case__ ( self : Dict ) -> str:
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def snake_case__ ( self : int ) -> Dict:
__UpperCAmelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case__ ( self : Union[str, Any] , __a : str ) -> List[str]:
return self.sp_model.encode(__a , out_type=__a )
def snake_case__ ( self : int , __a : Union[str, Any] ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(__a ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(__a )
def snake_case__ ( self : Optional[Any] , __a : List[Any] ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case__ ( self : Dict , __a : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase = []
__UpperCAmelCase = ''''''
__UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
__UpperCAmelCase = True
__UpperCAmelCase = []
else:
current_sub_tokens.append(__a )
__UpperCAmelCase = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def __getstate__( self : Optional[Any] ) -> Tuple:
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : List[Any] , __a : Optional[int] ) -> Tuple:
__UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__UpperCAmelCase = {}
__UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self : Any , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , '''wb''' ) as fi:
__UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 654 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowerCAmelCase : List[Any] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 | 1 |
'''simple docstring'''
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
__lowerCAmelCase : Any = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
__lowerCAmelCase : Dict = {
"jukebox": 512,
}
class A ( UpperCAmelCase ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_LYRIC_TOKENS_SIZES
a_ = ['''input_ids''', '''attention_mask''']
def __init__( self : int , __a : str , __a : Optional[int] , __a : Tuple , __a : Union[str, Any]=["v3", "v2", "v2"] , __a : List[Any]=5_1_2 , __a : Tuple=5 , __a : List[Any]="<|endoftext|>" , **__a : Optional[Any] , ) -> Any:
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
super().__init__(
unk_token=__a , n_genres=__a , version=__a , max_n_lyric_tokens=__a , **__a , )
__UpperCAmelCase = version
__UpperCAmelCase = max_n_lyric_tokens
__UpperCAmelCase = n_genres
with open(__a , encoding='''utf-8''' ) as vocab_handle:
__UpperCAmelCase = json.load(__a )
with open(__a , encoding='''utf-8''' ) as vocab_handle:
__UpperCAmelCase = json.load(__a )
with open(__a , encoding='''utf-8''' ) as vocab_handle:
__UpperCAmelCase = json.load(__a )
__UpperCAmelCase = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
__UpperCAmelCase = oov.replace(r'''\-\'''' , r'''\-+\'''' )
__UpperCAmelCase = regex.compile(__a )
__UpperCAmelCase = {v: k for k, v in self.artists_encoder.items()}
__UpperCAmelCase = {v: k for k, v in self.genres_encoder.items()}
__UpperCAmelCase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def snake_case__ ( self : Optional[int] ) -> List[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def snake_case__ ( self : Dict ) -> Optional[Any]:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def snake_case__ ( self : Optional[Any] , __a : List[Any] , __a : Tuple , __a : Optional[int] ) -> List[Any]:
__UpperCAmelCase = [self.artists_encoder.get(__a , 0 ) for artist in list_artists]
for genres in range(len(__a ) ):
__UpperCAmelCase = [self.genres_encoder.get(__a , 0 ) for genre in list_genres[genres]]
__UpperCAmelCase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__UpperCAmelCase = [[self.lyrics_encoder.get(__a , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def snake_case__ ( self : List[str] , __a : List[Any] ) -> Optional[int]:
return list(__a )
def snake_case__ ( self : Union[str, Any] , __a : Union[str, Any] , __a : Any , __a : str , **__a : Optional[Any] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.prepare_for_tokenization(__a , __a , __a )
__UpperCAmelCase = self._tokenize(__a )
return artist, genre, lyrics
def snake_case__ ( self : int , __a : str , __a : str , __a : str , __a : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__UpperCAmelCase = artists[idx].lower()
__UpperCAmelCase = [genres[idx].lower()]
else:
__UpperCAmelCase = self._normalize(artists[idx] ) + '''.v2'''
__UpperCAmelCase = [
self._normalize(__a ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__UpperCAmelCase = regex.compile(r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
__UpperCAmelCase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
__UpperCAmelCase = {vocab[index]: index + 1 for index in range(len(__a ) )}
__UpperCAmelCase = 0
__UpperCAmelCase = len(__a ) + 1
__UpperCAmelCase = self.vocab
__UpperCAmelCase = {v: k for k, v in self.vocab.items()}
__UpperCAmelCase = ''''''
else:
__UpperCAmelCase = regex.compile(r'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
__UpperCAmelCase = self._run_strip_accents(__a )
__UpperCAmelCase = lyrics.replace('''\\''' , '''\n''' )
__UpperCAmelCase = self.out_of_vocab.sub('''''' , __a ), [], []
return artists, genres, lyrics
def snake_case__ ( self : Optional[int] , __a : Tuple ) -> Optional[int]:
__UpperCAmelCase = unicodedata.normalize('''NFD''' , __a )
__UpperCAmelCase = []
for char in text:
__UpperCAmelCase = unicodedata.category(__a )
if cat == "Mn":
continue
output.append(__a )
return "".join(__a )
def snake_case__ ( self : int , __a : str ) -> str:
__UpperCAmelCase = (
[chr(__a ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )]
+ [chr(__a ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )]
+ [chr(__a ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )]
+ ['''.''']
)
__UpperCAmelCase = frozenset(__a )
__UpperCAmelCase = re.compile(r'''_+''' )
__UpperCAmelCase = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
__UpperCAmelCase = pattern.sub('''_''' , __a ).strip('''_''' )
return text
def snake_case__ ( self : Union[str, Any] , __a : List[str] ) -> str:
return " ".join(__a )
def snake_case__ ( self : List[str] , __a : Dict , __a : Optional[Union[str, TensorType]] = None , __a : bool = False ) -> Union[str, Any]:
# Convert to TensorType
if not isinstance(__a , __a ):
__UpperCAmelCase = TensorType(__a )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
__UpperCAmelCase = tf.constant
__UpperCAmelCase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
__UpperCAmelCase = torch.tensor
__UpperCAmelCase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
__UpperCAmelCase = jnp.array
__UpperCAmelCase = _is_jax
else:
__UpperCAmelCase = np.asarray
__UpperCAmelCase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__UpperCAmelCase = [inputs]
if not is_tensor(__a ):
__UpperCAmelCase = as_tensor(__a )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__( self : Dict , __a : Optional[int] , __a : Union[str, Any] , __a : int="" , __a : Dict="pt" ) -> BatchEncoding:
__UpperCAmelCase = [0, 0, 0]
__UpperCAmelCase = [artist] * len(self.version )
__UpperCAmelCase = [genres] * len(self.version )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.tokenize(__a , __a , __a )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._convert_token_to_id(__a , __a , __a )
__UpperCAmelCase = [-INFINITY] * len(full_tokens[-1] )
__UpperCAmelCase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__a )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def snake_case__ ( self : Tuple , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=__a ) )
__UpperCAmelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=__a ) )
__UpperCAmelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__a ) )
return (artists_file, genres_file, lyrics_file)
def snake_case__ ( self : List[Any] , __a : Dict , __a : str , __a : str ) -> str:
__UpperCAmelCase = self.artists_decoder.get(__a )
__UpperCAmelCase = [self.genres_decoder.get(__a ) for genre in genres_index]
__UpperCAmelCase = [self.lyrics_decoder.get(__a ) for character in lyric_index]
return artist, genres, lyrics
| 654 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class A ( UpperCAmelCase ):
a_ = '''bert-generation'''
def __init__( self : str , __a : str=5_0_3_5_8 , __a : int=1_0_2_4 , __a : Optional[Any]=2_4 , __a : Any=1_6 , __a : int=4_0_9_6 , __a : Any="gelu" , __a : Union[str, Any]=0.1 , __a : Any=0.1 , __a : Union[str, Any]=5_1_2 , __a : int=0.0_2 , __a : str=1e-12 , __a : List[str]=0 , __a : Optional[int]=2 , __a : Tuple=1 , __a : str="absolute" , __a : Optional[Any]=True , **__a : Tuple , ) -> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
| 654 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Dict = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 |
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__lowerCAmelCase : str = 299_792_458
# Symbols
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = symbols("ct x y z")
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 )
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return np.array(
[
[gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0],
[-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : np.ndarray | None = None ):
"""simple docstring"""
# Ensure event is not empty
if event is None:
__UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCamelCase__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__lowerCAmelCase : Dict = transform(29_979_245)
print("Example of four vector: ")
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__lowerCAmelCase : Union[str, Any] = {ct: c, x: 1, y: 1, z: 1}
__lowerCAmelCase : Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 654 | 1 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 654 |
'''simple docstring'''
import heapq
import sys
import numpy as np
__lowerCAmelCase : Any = tuple[int, int]
class A :
def __init__( self : Optional[int] ) -> int:
__UpperCAmelCase = []
__UpperCAmelCase = set()
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def snake_case__ ( self : Dict ) -> Optional[int]:
return len(self.elements ) == 0
def snake_case__ ( self : Optional[int] , __a : Optional[Any] , __a : Dict ) -> Optional[Any]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__a )
else:
# update
# print("update", item)
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def snake_case__ ( self : int , __a : Any ) -> int:
if item in self.set:
self.set.remove(__a )
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def snake_case__ ( self : List[str] ) -> Dict:
return self.elements[0][1]
def snake_case__ ( self : Any ) -> List[str]:
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
self.set.remove(__a )
return (priority, item)
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# euclidean distance
__UpperCAmelCase = np.array(UpperCamelCase__ )
__UpperCAmelCase = np.array(UpperCamelCase__ )
return np.linalg.norm(a - b )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# integer division by time variable
return consistent_heuristic(UpperCamelCase__ , UpperCamelCase__ ) // t
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] ):
"""simple docstring"""
__UpperCAmelCase = g_function[start] + Wa * heuristics[i](UpperCamelCase__ , UpperCamelCase__ )
return ans
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase = np.chararray((n, n) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
__UpperCAmelCase = '''*'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (j, (n - 1) - i) in blocks:
__UpperCAmelCase = '''#'''
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[goal]
while x != start:
((__UpperCAmelCase) , (__UpperCAmelCase)) = x
# print(x)
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[x]
__UpperCAmelCase = '''-'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
__UpperCAmelCase = back_pointer[goal]
while x != start:
print(UpperCamelCase__ , end=''' ''' )
__UpperCAmelCase = back_pointer[x]
print(UpperCamelCase__ )
sys.exit()
def lowerCAmelCase ( UpperCamelCase__ : TPos ):
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , ):
"""simple docstring"""
for itera in range(UpperCamelCase__ ):
open_list[itera].remove_element(UpperCamelCase__ )
# print("s", s)
# print("j", j)
((__UpperCAmelCase) , (__UpperCAmelCase)) = s
__UpperCAmelCase = (x - 1, y)
__UpperCAmelCase = (x + 1, y)
__UpperCAmelCase = (x, y + 1)
__UpperCAmelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(UpperCamelCase__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(UpperCamelCase__ )
__UpperCAmelCase = -1
__UpperCAmelCase = float('''inf''' )
if valid(UpperCamelCase__ ) and g_function[neighbours] > g_function[s] + 1:
__UpperCAmelCase = g_function[s] + 1
__UpperCAmelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(UpperCamelCase__ , key(UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ) )
if neighbours not in close_list_inad:
for var in range(1 , UpperCamelCase__ ):
if key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) <= Wa * key(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ):
open_list[j].put(
UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
__lowerCAmelCase : Optional[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__lowerCAmelCase : List[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__lowerCAmelCase : Dict = make_common_ground()
__lowerCAmelCase : int = blocks_blk
# hyper parameters
__lowerCAmelCase : Dict = 1
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : Union[str, Any] = 20
__lowerCAmelCase : Any = 3 # one consistent and two other inconsistent
# start and end destination
__lowerCAmelCase : Optional[Any] = (0, 0)
__lowerCAmelCase : Any = (n - 1, n - 1)
__lowerCAmelCase : Optional[int] = 1
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = {start: 0, goal: float('''inf''' )}
__UpperCAmelCase = {start: -1, goal: -1}
__UpperCAmelCase = []
__UpperCAmelCase = set()
for i in range(UpperCamelCase__ ):
open_list.append(PriorityQueue() )
open_list[i].put(UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
__UpperCAmelCase = []
__UpperCAmelCase = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , UpperCamelCase__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase , __UpperCAmelCase = open_list[i].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_inad.append(UpperCamelCase__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase = open_list[0].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_anchor.append(UpperCamelCase__ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(UpperCamelCase__ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 654 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
__lowerCAmelCase : str = 5
__lowerCAmelCase : Union[str, Any] = 10
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase , unittest.TestCase ):
a_ = SpeechaTextTokenizer
a_ = False
a_ = True
def snake_case__ ( self : Tuple ) -> List[Any]:
super().setUp()
__UpperCAmelCase = sp.SentencePieceProcessor()
spm_model.Load(__a )
__UpperCAmelCase = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__a ) )]
__UpperCAmelCase = dict(zip(__a , range(len(__a ) ) ) )
__UpperCAmelCase = Path(self.tmpdirname )
save_json(__a , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__a , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
__UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : List[str] ) -> Optional[int]:
__UpperCAmelCase = '''<pad>'''
__UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def snake_case__ ( self : List[Any] ) -> Dict:
__UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__a ) , 1_0_0_1 )
def snake_case__ ( self : List[Any] ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_1 )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
__UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [2_8_9, 5_0, 1_4, 1_7_4, 3_8_6] , )
__UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [1_2, 2_5, 8_8, 5_9, 2_8, 2_3, 1_1, 4, 6_0_6, 3_5_1, 3_5_1, 3_5_1, 7, 1_6, 7_0, 5_0, 7_6, 8_4, 1_0, 4, 8] )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def snake_case__ ( self : Tuple ) -> Optional[int]:
# fmt: off
__UpperCAmelCase = {'''input_ids''': [[3_7_9_1, 7_9_7, 3_1, 1_1, 6_4, 7_9_7, 3_1, 2_4_2_9, 4_3_3, 1_2, 1_1_7_6, 1_2, 2_0, 7_8_6, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 3_2_3_8, 7_9_7, 3_1, 1_1, 3_5, 9_3, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_7, 6_1_0, 4_0, 6_2, 4_5_5, 6_5_7, 1_0_4_2, 1_2_3, 7_8_0, 1_7_7, 3_7, 3_0_9, 2_4_1, 1_2_9_8, 5_1_4, 2_0, 2_9_2, 2_7_3_7, 1_1_4, 2_4_6_9, 2_4_1, 8_5, 6_4, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 4, 5_0_9, 4_0_6, 4_2_3, 3_7, 6_0_1, 4, 7_7_7, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 2_8_4, 4, 3_3_8_8, 5_1_1, 4_5_9, 4, 3_5_5_5, 4_0, 3_2_1, 3_0_2, 7_0_5, 4, 3_3_8_8, 5_1_1, 5_8_3, 3_2_6, 5, 5, 5, 6_2, 3_3_1_0, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 3_2, 3_1, 8_5_3, 4_1_8, 6_4, 5_8_3, 5_1_1, 1_6_0_5, 6_2, 3_5, 9_3, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 1_5_2_1, 6_4, 5_8_3, 5_1_1, 5_1_9, 6_2, 2_0, 1_5_1_5, 7_6_4, 2_0, 1_4_9, 2_6_1, 5_6_2_5, 7_9_7_2, 2_0, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_9_2_5, 1_6_7_5, 1_1, 1_5, 8_0_2, 7_9_7_2, 5_7_6, 2_1_7, 1_5_0_8, 1_1, 3_5, 9_3, 1_2_5_3, 2_4_4_1, 1_5, 2_8_9, 6_5_2, 3_1, 4_1_6, 3_2_1, 3_8_4_2, 1_1_5, 4_0, 9_1_1, 8, 4_7_6, 6_1_9, 4, 3_8_0, 1_4_2, 4_2_3, 3_3_5, 2_4_0, 3_5, 9_3, 2_6_4, 8, 1_1, 3_3_5, 5_6_9, 4_2_0, 1_6_3, 5, 2], [2_6_0, 5_4_8, 5_2_8, 4_2_3, 2_0, 4_5_1, 2_0, 2_6_8_1, 1_1_5_3, 3_4_3_4, 2_0, 5_5_4_0, 3_7, 5_6_7, 1_2_6, 1_2_5_3, 2_4_4_1, 3_3_7_6, 4_4_9, 2_1_0, 4_3_1, 1_5_6_3, 1_7_7, 7_6_7, 5_5_4_0, 1_1, 1_2_0_3, 4_7_2, 1_1, 2_9_5_3, 6_8_5, 2_8_5, 3_6_4, 7_0_6, 1_1_5_3, 2_0, 6_7_9_9, 2_0, 2_8_6_9, 2_0, 4_4_6_4, 1_2_6, 4_0, 2_4_2_9, 2_0, 1_0_4_0, 8_6_6, 2_6_6_4, 4_1_8, 2_0, 3_1_8, 2_0, 1_7_2_6, 1_8_6, 2_0, 2_6_5, 5_2_2, 3_5, 9_3, 2_1_9_1, 4_6_3_4, 2_0, 1_0_4_0, 1_2, 6_7_9_9, 1_5, 2_2_8, 2_3_5_6, 1_4_2, 3_1, 1_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_7_5, 2_6_6_6, 6_8_4, 1_5_8_2, 1_1_7_6, 1_2, 6_2_7, 1_4_9, 6_1_9, 2_0, 4_9_0_2, 5_6_3, 1_1, 2_0, 1_4_9, 2_6_1, 3_4_2_0, 2_3_5_6, 1_7_4, 1_4_2, 4_7_1_4, 1_3_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , )
@require_sentencepiece
class A ( unittest.TestCase ):
a_ = '''valhalla/s2t_mustc_multilinguial_medium'''
a_ = '''C\'est trop cool'''
a_ = '''Esto es genial'''
@classmethod
def snake_case__ ( cls : Any ) -> List[Any]:
__UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def snake_case__ ( self : List[Any] ) -> List[str]:
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 1_1 )
def snake_case__ ( self : Tuple ) -> Tuple:
self.assertEqual(self.tokenizer.vocab_size , 1_0_0_0_0 )
def snake_case__ ( self : int ) -> Dict:
self.assertIn(__a , self.tokenizer.all_special_ids )
__UpperCAmelCase = [ES_CODE, 4, 1_6_0_1, 4_7, 7_6_4_7, 2]
__UpperCAmelCase = self.tokenizer.decode(__a , skip_special_tokens=__a )
__UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase = '''fr'''
__UpperCAmelCase = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __a )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def snake_case__ ( self : Any ) -> Tuple:
__UpperCAmelCase = '''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
__UpperCAmelCase = '''es'''
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 654 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__lowerCAmelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : str = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase : int = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__lowerCAmelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase : List[str] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__lowerCAmelCase : Optional[int] = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
__UpperCAmelCase = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = tf_models
__UpperCAmelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = flax_models
__UpperCAmelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = pt_models
__UpperCAmelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCAmelCase = True
break
# Try again after removing the last word in the name
__UpperCAmelCase = ''''''.join(camel_case_split(UpperCamelCase__ )[:-1] )
__UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCAmelCase = list(UpperCamelCase__ )
all_models.sort()
__UpperCAmelCase = {'''model_type''': all_models}
__UpperCAmelCase = [pt_models[t] for t in all_models]
__UpperCAmelCase = [tf_models[t] for t in all_models]
__UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCAmelCase = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCAmelCase = '''AutoTokenizer'''
__UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCAmelCase = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
__UpperCAmelCase = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
__UpperCAmelCase = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = get_frameworks_table()
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
__UpperCAmelCase = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=UpperCamelCase__ )
__UpperCAmelCase = Dataset.from_json(UpperCamelCase__ )
__UpperCAmelCase = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(UpperCamelCase__ ) )
}
__UpperCAmelCase = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCAmelCase = sorted(table.keys() )
__UpperCAmelCase = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , '''pipeline_tags.json''' ) )
if commit_sha is not None:
__UpperCAmelCase = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCAmelCase = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=UpperCamelCase__ , repo_type='''dataset''' , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCAmelCase = pipeline_tasks[key]['''pt''']
if isinstance(UpperCamelCase__ , (list, tuple) ):
__UpperCAmelCase = model[0]
__UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ''', '''.join(UpperCamelCase__ )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__lowerCAmelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 654 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowerCAmelCase : Tuple = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class A ( UpperCAmelCase ):
a_ = '''albert'''
def __init__( self : Union[str, Any] , __a : str=3_0_0_0_0 , __a : List[str]=1_2_8 , __a : List[Any]=4_0_9_6 , __a : str=1_2 , __a : List[str]=1 , __a : Tuple=6_4 , __a : Optional[int]=1_6_3_8_4 , __a : List[Any]=1 , __a : Any="gelu_new" , __a : Any=0 , __a : Dict=0 , __a : Any=5_1_2 , __a : int=2 , __a : List[Any]=0.0_2 , __a : Union[str, Any]=1e-12 , __a : List[str]=0.1 , __a : Union[str, Any]="absolute" , __a : Optional[Any]=0 , __a : Dict=2 , __a : List[str]=3 , **__a : Tuple , ) -> int:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = embedding_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_hidden_groups
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = inner_group_num
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = classifier_dropout_prob
__UpperCAmelCase = position_embedding_type
class A ( UpperCAmelCase ):
@property
def snake_case__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 654 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCAmelCase : Optional[int] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class A ( unittest.TestCase ):
def snake_case__ ( self : Any , __a : str , __a : bool , __a : str = None , __a : list = None ) -> Tuple:
__UpperCAmelCase = None
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
__UpperCAmelCase = os.path.abspath('''examples''' )
for item in os.listdir(__a ):
if item not in EXCLUDE_EXAMPLES:
__UpperCAmelCase = os.path.join(__a , __a )
if os.path.isfile(__a ) and ".py" in item_path:
with self.subTest(
tested_script=__a , feature_script=__a , tested_section='''main()''' if parser_only else '''training_function()''' , ):
__UpperCAmelCase = compare_against_test(
os.path.join(__a , __a ) , __a , __a , __a )
__UpperCAmelCase = '''\n'''.join(__a )
if special_strings is not None:
for string in special_strings:
__UpperCAmelCase = diff.replace(__a , '''''' )
self.assertEqual(__a , '''''' )
def snake_case__ ( self : Optional[Any] ) -> str:
self.one_complete_example('''complete_nlp_example.py''' , __a )
self.one_complete_example('''complete_nlp_example.py''' , __a )
def snake_case__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
__UpperCAmelCase = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class A ( UpperCAmelCase ):
a_ = False
@classmethod
def snake_case__ ( cls : Tuple ) -> str:
super().setUpClass()
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Dict ) -> int:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
def snake_case__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
if torch.cuda.is_available():
__UpperCAmelCase = torch.cuda.device_count()
else:
__UpperCAmelCase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
else:
self.assertIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
@slow
def snake_case__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
__UpperCAmelCase = re.findall('''({.+})''' , __a )
__UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1]
__UpperCAmelCase = ast.literal_eval(__a )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case__ ( self : Dict ) -> int:
__UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCAmelCase = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__a , '''tracking''' ) ) )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 654 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__lowerCAmelCase : str = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : tuple , UpperCamelCase__ : Path , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any]=False , ):
"""simple docstring"""
output_path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , enable_onnx_checker=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
else:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : bool = False ):
"""simple docstring"""
__UpperCAmelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__UpperCAmelCase = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
__UpperCAmelCase = '''cpu'''
__UpperCAmelCase = Path(UpperCamelCase__ )
# VAE DECODER
__UpperCAmelCase = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
__UpperCAmelCase = vae_decoder.config.latent_channels
# forward only through the decoder part
__UpperCAmelCase = vae_decoder.decode
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , 2_5 , 2_5 ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=UpperCamelCase__ , )
del vae_decoder
if __name__ == "__main__":
__lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
__lowerCAmelCase : Optional[int] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 654 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCAmelCase : Any = ""
__lowerCAmelCase : int = ""
__lowerCAmelCase : Union[str, Any] = ""
__lowerCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase = get_dataset(UpperCamelCase__ , UpperCamelCase__ )
print('''Processing...''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = update_image_and_anno(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for index, image in enumerate(UpperCamelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCAmelCase = random_chars(3_2 )
__UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCAmelCase = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"""Success {index+1}/{len(UpperCamelCase__ )} with {file_name}""" )
__UpperCAmelCase = []
for anno in new_annos[index]:
__UpperCAmelCase = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(UpperCamelCase__ )
with open(f"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
for label_file in glob.glob(os.path.join(UpperCamelCase__ , '''*.txt''' ) ):
__UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(UpperCamelCase__ ) as in_file:
__UpperCAmelCase = in_file.readlines()
__UpperCAmelCase = os.path.join(UpperCamelCase__ , f"""{label_name}.jpg""" )
__UpperCAmelCase = []
for obj_list in obj_lists:
__UpperCAmelCase = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCamelCase__ )
labels.append(UpperCamelCase__ )
return img_paths, labels
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int = 1 ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = []
for idx in range(len(UpperCamelCase__ ) ):
__UpperCAmelCase = []
__UpperCAmelCase = img_list[idx]
path_list.append(UpperCamelCase__ )
__UpperCAmelCase = anno_list[idx]
__UpperCAmelCase = cva.imread(UpperCamelCase__ )
if flip_type == 1:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCamelCase__ )
new_imgs_list.append(UpperCamelCase__ )
return new_imgs_list, new_annos_lists, path_list
def lowerCAmelCase ( UpperCamelCase__ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 654 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : int = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 |
'''simple docstring'''
from pathlib import Path
import fire
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = Path(UpperCamelCase__ )
__UpperCAmelCase = Path(UpperCamelCase__ )
dest_dir.mkdir(exist_ok=UpperCamelCase__ )
for path in src_dir.iterdir():
__UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
__UpperCAmelCase = dest_dir.joinpath(path.name )
print(UpperCamelCase__ )
dest_path.open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 654 | 1 |
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__lowerCAmelCase : List[Any] = HfArgumentParser(InitializationArguments)
__lowerCAmelCase : int = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__lowerCAmelCase : Any = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__lowerCAmelCase : int = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
__lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__lowerCAmelCase : Dict = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 654 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = f"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase__ )
if number < 1:
__UpperCAmelCase = f"""Input value of [number={number}] must be > 0"""
raise ValueError(UpperCamelCase__ )
__UpperCAmelCase = 1
for i in range(1 , UpperCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 1 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = IFInpaintingSuperResolutionPipeline
a_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
a_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def snake_case__ ( self : Tuple ) -> List[str]:
return self._get_superresolution_dummy_components()
def snake_case__ ( self : Optional[int] , __a : List[str] , __a : List[str]=0 ) -> Any:
if str(__a ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(__a )
else:
__UpperCAmelCase = torch.Generator(device=__a ).manual_seed(__a )
__UpperCAmelCase = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__a ) ).to(__a )
__UpperCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__a ) ).to(__a )
__UpperCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__a ) ).to(__a )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def snake_case__ ( self : int ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def snake_case__ ( self : str ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case__ ( self : Tuple ) -> str:
self._test_save_load_local()
def snake_case__ ( self : Any ) -> Tuple:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 654 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class A ( nn.Module ):
def __init__( self : Optional[Any] ) -> int:
super().__init__()
__UpperCAmelCase = nn.Linear(3 , 4 )
__UpperCAmelCase = nn.BatchNormad(4 )
__UpperCAmelCase = nn.Linear(4 , 5 )
def snake_case__ ( self : List[str] , __a : Optional[int] ) -> Optional[int]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A ( unittest.TestCase ):
def snake_case__ ( self : Optional[int] ) -> Any:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def snake_case__ ( self : Any ) -> int:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__a : Optional[int] ):
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : Any ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Union[str, Any] , __a : int ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__a ) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def snake_case__ ( self : Any ) -> List[Any]:
__UpperCAmelCase = torch.cuda.memory_allocated()
__UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __a )
__UpperCAmelCase = release_memory(__a )
self.assertEqual(torch.cuda.memory_allocated() , __a )
| 654 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( UpperCAmelCase , unittest.TestCase ):
a_ = OpenAIGPTTokenizer
a_ = OpenAIGPTTokenizerFast
a_ = True
a_ = False
def snake_case__ ( self : Dict ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(__a , range(len(__a ) ) ) )
__UpperCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__a ) )
def snake_case__ ( self : Optional[Any] , __a : Optional[Any] ) -> List[Any]:
return "lower newer", "lower newer"
def snake_case__ ( self : Any ) -> Optional[int]:
__UpperCAmelCase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__UpperCAmelCase = '''lower'''
__UpperCAmelCase = ['''low''', '''er</w>''']
__UpperCAmelCase = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__UpperCAmelCase = tokens + ['''<unk>''']
__UpperCAmelCase = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def snake_case__ ( self : Tuple , __a : int=1_5 ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
__UpperCAmelCase = '''This is a simple input'''
__UpperCAmelCase = ['''This is a simple input 1''', '''This is a simple input 2''']
__UpperCAmelCase = ('''This is a simple input''', '''This is a pair''')
__UpperCAmelCase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='''max_length''' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='''max_length''' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='''max_length''' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='''max_length''' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='''max_length''' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='''max_length''' , )
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A ( UpperCAmelCase ):
pass
| 654 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = u
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = temp * (u - i)
return temp
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = int(input('''enter the numbers of values: ''' ) )
__UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
__UpperCAmelCase = 0
print('''enter the values of parameters in a list: ''' )
__UpperCAmelCase = list(map(UpperCamelCase__ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = float(input() )
__UpperCAmelCase = int(input('''enter the value to interpolate: ''' ) )
__UpperCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
__UpperCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
__UpperCAmelCase = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 654 | 1 |
'''simple docstring'''
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = os.path.abspath(UpperCamelCase__ )
logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
__UpperCAmelCase = tf.train.list_variables(UpperCamelCase__ )
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__UpperCAmelCase = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(f"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
__UpperCAmelCase = name[1:]
# figure out how many levels deep the name is
__UpperCAmelCase = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(UpperCamelCase__ )
# read data
__UpperCAmelCase = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
names.append('''/'''.join(UpperCamelCase__ ) )
arrays.append(UpperCamelCase__ )
logger.info(f"""Read a total of {len(UpperCamelCase__ ):,} layers""" )
# Sanity check
if len(set(UpperCamelCase__ ) ) != 1:
raise ValueError(f"""Found layer names with different depths (layer depth {list(set(UpperCamelCase__ ) )})""" )
__UpperCAmelCase = list(set(UpperCamelCase__ ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = full_name.split('''/''' )
__UpperCAmelCase = model
__UpperCAmelCase = []
for i, m_name in enumerate(UpperCamelCase__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
__UpperCAmelCase = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''embeddings''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''encoder''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''layer''' )
__UpperCAmelCase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''pooler''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''token_type_embeddings''' )
else:
raise ValueError(f"""Unknown embedding layer with name {full_name}""" )
trace.append('''weight''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''attention''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''attention''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''output''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''attention''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''output''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''output''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''output''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''intermediate''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''weight''' )
else:
logger.warning(f"""Ignored {m_name}""" )
# for certain layers reshape is necessary
__UpperCAmelCase = '''.'''.join(UpperCamelCase__ )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , UpperCamelCase__ ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , UpperCamelCase__ ):
__UpperCAmelCase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__UpperCAmelCase = array.transpose()
if pointer.shape == array.shape:
__UpperCAmelCase = torch.from_numpy(UpperCamelCase__ )
else:
raise ValueError(
f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
f""" {array.shape}""" )
logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ):
"""simple docstring"""
# Instantiate model
logger.info(f"""Loading model based on config from {config_path}...""" )
__UpperCAmelCase = BertConfig.from_json_file(UpperCamelCase__ )
__UpperCAmelCase = BertModel(UpperCamelCase__ )
# Load weights from checkpoint
logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
__lowerCAmelCase : List[str] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 654 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__UpperCAmelCase = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
__UpperCAmelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
__UpperCAmelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = dct.pop(UpperCamelCase__ )
__UpperCAmelCase = val
def lowerCAmelCase ( UpperCamelCase__ : Dict ):
"""simple docstring"""
if "handwritten" in checkpoint_url:
__UpperCAmelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCamelCase__ )
__UpperCAmelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__UpperCAmelCase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = 4_0_9_6
__UpperCAmelCase = 2_4
__UpperCAmelCase = 1_6
__UpperCAmelCase = 1_0_2_4
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = False
__UpperCAmelCase = '''relu'''
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
# load HuggingFace model
__UpperCAmelCase = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ )
__UpperCAmelCase = TrOCRForCausalLM(UpperCamelCase__ )
__UpperCAmelCase = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
__UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , check_hash=UpperCamelCase__ )['''model''']
__UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
__UpperCAmelCase = val
else:
__UpperCAmelCase = val
# load state dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
__UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size )
__UpperCAmelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
__UpperCAmelCase = TrOCRProcessor(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = processor(images=prepare_img(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
__UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__UpperCAmelCase = model(pixel_values=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ )
__UpperCAmelCase = outputs.logits
__UpperCAmelCase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , UpperCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 654 | 1 |
'''simple docstring'''
import math
import os
import sys
def lowerCAmelCase ( UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = ''''''
try:
with open(UpperCamelCase__ , '''rb''' ) as binary_file:
__UpperCAmelCase = binary_file.read()
for dat in data:
__UpperCAmelCase = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCAmelCase ( UpperCamelCase__ : dict[str, str] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ):
"""simple docstring"""
lexicon.pop(UpperCamelCase__ )
__UpperCAmelCase = last_match_id
if math.loga(UpperCamelCase__ ).is_integer():
for curr_key in lexicon:
__UpperCAmelCase = '''0''' + lexicon[curr_key]
__UpperCAmelCase = bin(UpperCamelCase__ )[2:]
def lowerCAmelCase ( UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = {'''0''': '''0''', '''1''': '''1'''}
__UpperCAmelCase , __UpperCAmelCase = '''''', ''''''
__UpperCAmelCase = len(UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__UpperCAmelCase = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
index += 1
__UpperCAmelCase = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__UpperCAmelCase = lexicon[curr_string]
result += last_match_id
return result
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = os.path.getsize(UpperCamelCase__ )
__UpperCAmelCase = bin(UpperCamelCase__ )[2:]
__UpperCAmelCase = len(UpperCamelCase__ )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = 8
try:
with open(UpperCamelCase__ , '''wb''' ) as opened_file:
__UpperCAmelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(UpperCamelCase__ , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = read_file_binary(UpperCamelCase__ )
__UpperCAmelCase = compress_data(UpperCamelCase__ )
__UpperCAmelCase = add_file_length(UpperCamelCase__ , UpperCamelCase__ )
write_file_binary(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 654 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A ( unittest.TestCase ):
def snake_case__ ( self : List[Any] , __a : List[str] , __a : Optional[Any] ) -> List[Any]:
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(__a ) for s in shape] )}.npy"""
def snake_case__ ( self : Dict ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case__ ( self : Optional[Any] , __a : Tuple=0 , __a : List[Any]=(4, 4, 6_4, 6_4) , __a : Optional[Any]=False ) -> Tuple:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return image
def snake_case__ ( self : int , __a : Optional[Any]=False , __a : Optional[Any]="CompVis/stable-diffusion-v1-4" ) -> Any:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = '''bf16''' if fpaa else None
__UpperCAmelCase , __UpperCAmelCase = FlaxUNetaDConditionModel.from_pretrained(
__a , subfolder='''unet''' , dtype=__a , revision=__a )
return model, params
def snake_case__ ( self : str , __a : int=0 , __a : Tuple=(4, 7_7, 7_6_8) , __a : Optional[int]=False ) -> Union[str, Any]:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[1_7, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1_0_0_0, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def snake_case__ ( self : Tuple , __a : Tuple , __a : str , __a : Optional[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[1_7, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1_0_0_0, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def snake_case__ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , shape=(4, 4, 9_6, 9_6) , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , shape=(4, 7_7, 1_0_2_4) , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
| 654 | 1 |
'''simple docstring'''
import random
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = [], [], []
for element in data:
if element < pivot:
less.append(UpperCamelCase__ )
elif element > pivot:
greater.append(UpperCamelCase__ )
else:
equal.append(UpperCamelCase__ )
return less, equal, greater
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : int ):
"""simple docstring"""
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(UpperCamelCase__ ) or index < 0:
return None
__UpperCAmelCase = items[random.randint(0 , len(UpperCamelCase__ ) - 1 )]
__UpperCAmelCase = 0
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = _partition(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = len(UpperCamelCase__ )
__UpperCAmelCase = len(UpperCamelCase__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(UpperCamelCase__ , UpperCamelCase__ )
# must be in larger
else:
return quick_select(UpperCamelCase__ , index - (m + count) )
| 654 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[int] = "examples/"
__lowerCAmelCase : Dict = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__lowerCAmelCase : List[str] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__lowerCAmelCase : int = "README.md"
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
"""simple docstring"""
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase , __UpperCAmelCase = REPLACE_PATTERNS[pattern]
__UpperCAmelCase = replace.replace('''VERSION''' , UpperCamelCase__ )
__UpperCAmelCase = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern='''examples''' )
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Any=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not patch:
update_version_in_examples(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
__UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.readlines()
# Find the start of the list.
__UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase__ ).groups()[0]
return packaging.version.parse(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Any=False ):
"""simple docstring"""
__UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__UpperCAmelCase = default_version.base_version
elif patch:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = get_version()
__UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
__UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__lowerCAmelCase : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 654 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
class A ( UpperCAmelCase ):
a_ = ['''pixel_values''']
def __init__( self : int , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PIL.Image.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : Union[int, float] = 1 / 2_5_5 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : Optional[Any] , ) -> None:
super().__init__(**__a )
__UpperCAmelCase = size if size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
__UpperCAmelCase = get_size_dict(__a )
__UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
__UpperCAmelCase = get_size_dict(__a , param_name='''crop_size''' )
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = resample
__UpperCAmelCase = do_center_crop
__UpperCAmelCase = crop_size
__UpperCAmelCase = do_rescale
__UpperCAmelCase = rescale_factor
__UpperCAmelCase = do_normalize
__UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PIL.Image.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
__UpperCAmelCase = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
__a , size=(size['''height'''], size['''width''']) , resample=__a , data_format=__a , **__a )
def snake_case__ ( self : str , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
__UpperCAmelCase = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(__a , size=(size['''height'''], size['''width''']) , data_format=__a , **__a )
def snake_case__ ( self : Union[str, Any] , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> Tuple:
return rescale(__a , scale=__a , data_format=__a , **__a )
def snake_case__ ( self : Any , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def snake_case__ ( self : Tuple , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : Optional[int]=None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[str] , ) -> PIL.Image.Image:
__UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase = resample if resample is not None else self.resample
__UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase = image_std if image_std is not None else self.image_std
__UpperCAmelCase = size if size is not None else self.size
__UpperCAmelCase = get_size_dict(__a )
__UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase = get_size_dict(__a , param_name='''crop_size''' )
__UpperCAmelCase = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__UpperCAmelCase = [to_numpy_array(__a ) for image in images]
if do_resize:
__UpperCAmelCase = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__UpperCAmelCase = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__UpperCAmelCase = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__UpperCAmelCase = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__UpperCAmelCase = [to_channel_dimension_format(__a , __a ) for image in images]
__UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=__a , tensor_type=__a )
| 654 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__UpperCAmelCase = len(UpperCamelCase__ )
__UpperCAmelCase = max(UpperCamelCase__ )
__UpperCAmelCase = min(UpperCamelCase__ )
# create the counting array
__UpperCAmelCase = coll_max + 1 - coll_min
__UpperCAmelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__UpperCAmelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
__UpperCAmelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__lowerCAmelCase : str = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 654 | 1 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__lowerCAmelCase : str = "naver-clova-ix/donut-base"
class A ( unittest.TestCase ):
def snake_case__ ( self : Tuple ) -> List[Any]:
__UpperCAmelCase = DonutProcessor.from_pretrained(__a )
def snake_case__ ( self : int ) -> int:
__UpperCAmelCase = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
__UpperCAmelCase = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
__UpperCAmelCase = self.processor.tokenajson(__a )
self.assertDictEqual(__a , __a )
| 654 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase ( UpperCamelCase__ : str = "AAPL" ):
"""simple docstring"""
__UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' )
__UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 654 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A ( UpperCAmelCase ):
a_ = (IPNDMScheduler,)
a_ = (('''num_inference_steps''', 5_0),)
def snake_case__ ( self : Optional[int] , **__a : str ) -> Tuple:
__UpperCAmelCase = {'''num_train_timesteps''': 1_0_0_0}
config.update(**__a )
return config
def snake_case__ ( self : str , __a : Optional[Any]=0 , **__a : List[str] ) -> Dict:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __a )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config(**__a )
__UpperCAmelCase = scheduler_class(**__a )
scheduler.set_timesteps(__a )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a )
__UpperCAmelCase = scheduler_class.from_pretrained(__a )
new_scheduler.set_timesteps(__a )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
__UpperCAmelCase = new_scheduler.step(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
__UpperCAmelCase = new_scheduler.step(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case__ ( self : Dict ) -> List[str]:
pass
def snake_case__ ( self : List[str] , __a : Union[str, Any]=0 , **__a : int ) -> Tuple:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __a )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**__a )
scheduler.set_timesteps(__a )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a )
__UpperCAmelCase = scheduler_class.from_pretrained(__a )
# copy over dummy past residuals
new_scheduler.set_timesteps(__a )
# copy over dummy past residual (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
__UpperCAmelCase = new_scheduler.step(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
__UpperCAmelCase = new_scheduler.step(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case__ ( self : str , **__a : Any ) -> str:
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(**__a )
__UpperCAmelCase = scheduler_class(**__a )
__UpperCAmelCase = 1_0
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__a )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(__a , __a )
__UpperCAmelCase = scheduler.step(__a , __a , __a ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(__a , __a )
__UpperCAmelCase = scheduler.step(__a , __a , __a ).prev_sample
return sample
def snake_case__ ( self : str ) -> Any:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __a )
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**__a )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__a , '''set_timesteps''' ):
scheduler.set_timesteps(__a )
elif num_inference_steps is not None and not hasattr(__a , '''set_timesteps''' ):
__UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.timesteps[5]
__UpperCAmelCase = scheduler.timesteps[6]
__UpperCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
__UpperCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__UpperCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
__UpperCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case__ ( self : Dict ) -> Dict:
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__a , time_step=__a )
def snake_case__ ( self : Dict ) -> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=__a , time_step=__a )
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase = self.full_loop()
__UpperCAmelCase = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 2_5_4_0_5_2_9 ) < 1_0
| 654 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
__UpperCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i]
__UpperCAmelCase = []
__UpperCAmelCase = 0
__UpperCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__UpperCAmelCase = []
__UpperCAmelCase = -1
for i in range(UpperCamelCase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__UpperCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__UpperCAmelCase = 0
__UpperCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__lowerCAmelCase : List[Any] = 4
__lowerCAmelCase : List[Any] = [2, 5, 3, 7]
__lowerCAmelCase : Tuple = [0, 0, 0, 0]
__lowerCAmelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase : Dict = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 654 | 1 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class A ( unittest.TestCase ):
def snake_case__ ( self : List[str] ) -> List[str]:
__UpperCAmelCase = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
__UpperCAmelCase , __UpperCAmelCase = get_aligned_output_features_output_indices(__a , __a , __a )
self.assertEqual(__a , ['''c'''] )
self.assertEqual(__a , [2] )
# Out indices set to match out features
__UpperCAmelCase , __UpperCAmelCase = get_aligned_output_features_output_indices(['''a''', '''c'''] , __a , __a )
self.assertEqual(__a , ['''a''', '''c'''] )
self.assertEqual(__a , [0, 2] )
# Out features set to match out indices
__UpperCAmelCase , __UpperCAmelCase = get_aligned_output_features_output_indices(__a , [0, 2] , __a )
self.assertEqual(__a , ['''a''', '''c'''] )
self.assertEqual(__a , [0, 2] )
# Out features selected from negative indices
__UpperCAmelCase , __UpperCAmelCase = get_aligned_output_features_output_indices(__a , [-3, -1] , __a )
self.assertEqual(__a , ['''a''', '''c'''] )
self.assertEqual(__a , [-3, -1] )
def snake_case__ ( self : Optional[Any] ) -> Optional[Any]:
# Stage names must be set
with self.assertRaises(__a ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , __a )
# Out features must be a list
with self.assertRaises(__a ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(__a ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(__a ):
verify_out_features_out_indices(__a , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(__a ):
verify_out_features_out_indices(__a , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(__a ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(__a ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(__a ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def snake_case__ ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase = BackboneMixin()
__UpperCAmelCase = ['''a''', '''b''', '''c''']
__UpperCAmelCase = ['''a''', '''c''']
__UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
__UpperCAmelCase = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
__UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 654 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[str] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : List[str] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : List[Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Optional[Any] , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Tuple , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : str , **__a : Tuple ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : int ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : List[str] , **__a : Optional[int] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Any ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Dict , **__a : List[str] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Optional[int] , **__a : Optional[int] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[str] , **__a : List[str] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[int] , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : str ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Any ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : str , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Optional[int] , **__a : Union[str, Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Union[str, Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Union[str, Any] , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : int , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : str ) -> Dict:
requires_backends(cls , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : str , **UpperCamelCase__ : str ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : str , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : str , **__a : List[str] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : List[Any] , **__a : List[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : Tuple ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : str , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : str ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : Tuple ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Tuple , **__a : str ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : str , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : int , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : str , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : int , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Union[str, Any] , **__a : Optional[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[Any] , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Dict ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Union[str, Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : Dict ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Tuple , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : Any ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Optional[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Union[str, Any] , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : Optional[int] , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Any , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : int , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Tuple , **__a : Optional[int] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : Tuple ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Union[str, Any] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[Any] , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : int , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Any , **__a : int ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Dict ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : int , **__a : Optional[int] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Dict , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Any , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : Tuple , **__a : Optional[int] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Optional[Any] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : Dict ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Union[str, Any] , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Any , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Union[str, Any] , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : List[Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Dict , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : Union[str, Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : int ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Optional[Any] , **__a : int ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[Any] , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Optional[Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[int] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[str] , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Tuple , **__a : Tuple ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[str] , **__a : int ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Tuple , **__a : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Any , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : str ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[str] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : str , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[Any] , **__a : List[str] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[str] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : str , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Tuple ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Any , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Tuple ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : int , **__a : Optional[Any] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Optional[int] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[str] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : List[str] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
| 654 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
__UpperCAmelCase = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(UpperCamelCase__ )
# Let's go
__UpperCAmelCase = parser.parse_args()
if not hasattr(UpperCamelCase__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase = args.func(UpperCamelCase__ )
service.run()
if __name__ == "__main__":
main()
| 654 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 654 | 1 |
'''simple docstring'''
__lowerCAmelCase : str = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__lowerCAmelCase : Optional[Any] = [{"type": "code", "content": INSTALL_CONTENT}]
__lowerCAmelCase : str = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 654 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[Any] = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 | 1 |
'''simple docstring'''
from pathlib import Path
import fire
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = Path(UpperCamelCase__ )
__UpperCAmelCase = Path(UpperCamelCase__ )
dest_dir.mkdir(exist_ok=UpperCamelCase__ )
for path in src_dir.iterdir():
__UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
__UpperCAmelCase = dest_dir.joinpath(path.name )
print(UpperCamelCase__ )
dest_path.open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 654 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , ):
"""simple docstring"""
__UpperCAmelCase = {}
if train_file is not None:
__UpperCAmelCase = [train_file]
if eval_file is not None:
__UpperCAmelCase = [eval_file]
if test_file is not None:
__UpperCAmelCase = [test_file]
__UpperCAmelCase = datasets.load_dataset('''csv''' , data_files=UpperCamelCase__ )
__UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() )
__UpperCAmelCase = features_name.pop(UpperCamelCase__ )
__UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
__UpperCAmelCase = {label: i for i, label in enumerate(UpperCamelCase__ )}
__UpperCAmelCase = tokenizer.model_input_names
__UpperCAmelCase = {}
if len(UpperCamelCase__ ) == 1:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' ) , batched=UpperCamelCase__ , )
elif len(UpperCamelCase__ ) == 2:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , ) , batched=UpperCamelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class A :
a_ = field(metadata={'''help''': '''Which column contains the label'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the training file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the development file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the test file'''} )
a_ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
a_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def lowerCAmelCase ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase__ ) , labelaid=UpperCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict:
__UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__UpperCAmelCase = TFTrainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCAmelCase = trainer.evaluate()
__UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(UpperCamelCase__ )
return results
if __name__ == "__main__":
main()
| 654 | 1 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Any = "https://openaipublic.azureedge.net/jukebox/models/"
__lowerCAmelCase : Optional[Any] = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 1_0:
__UpperCAmelCase = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 1_0:
__UpperCAmelCase = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 1_0:
__UpperCAmelCase = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 1_0:
__UpperCAmelCase = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__UpperCAmelCase = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__UpperCAmelCase = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__UpperCAmelCase = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__UpperCAmelCase = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = {}
import re
__UpperCAmelCase = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__UpperCAmelCase = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__UpperCAmelCase = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__UpperCAmelCase = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__UpperCAmelCase = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__UpperCAmelCase = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__UpperCAmelCase = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__UpperCAmelCase = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__UpperCAmelCase = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(UpperCamelCase__ ):
__UpperCAmelCase = re_encoder_block_conv_in.match(UpperCamelCase__ )
__UpperCAmelCase = regex_match.groups()
__UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
__UpperCAmelCase = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
__UpperCAmelCase = re_encoder_block_conv_in.sub(UpperCamelCase__ , UpperCamelCase__ )
elif re_encoder_block_resnet.fullmatch(UpperCamelCase__ ):
__UpperCAmelCase = re_encoder_block_resnet.match(UpperCamelCase__ )
__UpperCAmelCase = regex_match.groups()
__UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
__UpperCAmelCase = {'''1''': 1, '''3''': 2}[groups[-2]]
__UpperCAmelCase = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
__UpperCAmelCase = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__UpperCAmelCase = prefix + resnet_block
__UpperCAmelCase = re_encoder_block_resnet.sub(UpperCamelCase__ , UpperCamelCase__ )
elif re_encoder_block_proj_out.fullmatch(UpperCamelCase__ ):
__UpperCAmelCase = re_encoder_block_proj_out.match(UpperCamelCase__ )
__UpperCAmelCase = regex_match.groups()
__UpperCAmelCase = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
__UpperCAmelCase = re_encoder_block_proj_out.sub(UpperCamelCase__ , UpperCamelCase__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(UpperCamelCase__ ):
__UpperCAmelCase = re_decoder_block_conv_out.match(UpperCamelCase__ )
__UpperCAmelCase = regex_match.groups()
__UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
__UpperCAmelCase = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
__UpperCAmelCase = re_decoder_block_conv_out.sub(UpperCamelCase__ , UpperCamelCase__ )
elif re_decoder_block_resnet.fullmatch(UpperCamelCase__ ):
__UpperCAmelCase = re_decoder_block_resnet.match(UpperCamelCase__ )
__UpperCAmelCase = regex_match.groups()
__UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
__UpperCAmelCase = {'''1''': 1, '''3''': 2}[groups[-2]]
__UpperCAmelCase = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
__UpperCAmelCase = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__UpperCAmelCase = prefix + resnet_block
__UpperCAmelCase = re_decoder_block_resnet.sub(UpperCamelCase__ , UpperCamelCase__ )
elif re_decoder_block_proj_in.fullmatch(UpperCamelCase__ ):
__UpperCAmelCase = re_decoder_block_proj_in.match(UpperCamelCase__ )
__UpperCAmelCase = regex_match.groups()
__UpperCAmelCase = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
__UpperCAmelCase = re_decoder_block_proj_in.sub(UpperCamelCase__ , UpperCamelCase__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(UpperCamelCase__ ):
__UpperCAmelCase = re_prior_cond_conv_out.match(UpperCamelCase__ )
__UpperCAmelCase = regex_match.groups()
__UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
__UpperCAmelCase = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
__UpperCAmelCase = re_prior_cond_conv_out.sub(UpperCamelCase__ , UpperCamelCase__ )
elif re_prior_cond_resnet.fullmatch(UpperCamelCase__ ):
__UpperCAmelCase = re_prior_cond_resnet.match(UpperCamelCase__ )
__UpperCAmelCase = regex_match.groups()
__UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
__UpperCAmelCase = {'''1''': 1, '''3''': 2}[groups[-2]]
__UpperCAmelCase = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
__UpperCAmelCase = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__UpperCAmelCase = prefix + resnet_block
__UpperCAmelCase = re_prior_cond_resnet.sub(UpperCamelCase__ , UpperCamelCase__ )
elif re_prior_cond_proj_in.fullmatch(UpperCamelCase__ ):
__UpperCAmelCase = re_prior_cond_proj_in.match(UpperCamelCase__ )
__UpperCAmelCase = regex_match.groups()
__UpperCAmelCase = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
__UpperCAmelCase = re_prior_cond_proj_in.sub(UpperCamelCase__ , UpperCamelCase__ )
# keep original key
else:
__UpperCAmelCase = original_key
__UpperCAmelCase = replace_key(UpperCamelCase__ )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
__UpperCAmelCase = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
__UpperCAmelCase = original_key
__UpperCAmelCase = original_key
__UpperCAmelCase = value
return new_dict
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
__UpperCAmelCase = requests.get(f"""{PREFIX}{file}""" , allow_redirects=UpperCamelCase__ )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=UpperCamelCase__ )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , '''wb''' ).write(r.content )
__UpperCAmelCase = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__UpperCAmelCase = JukeboxConfig.from_pretrained(UpperCamelCase__ )
__UpperCAmelCase = JukeboxModel(UpperCamelCase__ )
__UpperCAmelCase = []
__UpperCAmelCase = {}
for i, dict_name in enumerate(UpperCamelCase__ ):
__UpperCAmelCase = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['''model''']
__UpperCAmelCase = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__UpperCAmelCase = old_dic[k]
elif k.endswith('''.w''' ):
__UpperCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__UpperCAmelCase = old_dic[k]
else:
__UpperCAmelCase = old_dic[k]
__UpperCAmelCase = '''vqvae''' if i == 0 else f"""priors.{3 - i}"""
__UpperCAmelCase = fix_jukebox_keys(UpperCamelCase__ , model.state_dict() , UpperCamelCase__ , UpperCamelCase__ )
weight_dict.append(UpperCamelCase__ )
__UpperCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , '''w''' ) as txtfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
return weight_dict
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
__lowerCAmelCase : Any = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 654 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__( self : List[Any] , __a : Any , ) -> Dict:
__UpperCAmelCase = parent
__UpperCAmelCase = 1_3
__UpperCAmelCase = 7
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = 9_9
__UpperCAmelCase = 3_2
__UpperCAmelCase = 2
__UpperCAmelCase = 4
__UpperCAmelCase = 3_7
__UpperCAmelCase = '''gelu'''
__UpperCAmelCase = 0.1
__UpperCAmelCase = 0.1
__UpperCAmelCase = 5_1_2
__UpperCAmelCase = 1_6
__UpperCAmelCase = 2
__UpperCAmelCase = 0.0_2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
__UpperCAmelCase = None
def snake_case__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Union[str, Any] , __a : List[str] , __a : int , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : int ) -> Any:
__UpperCAmelCase = TFDistilBertModel(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
__UpperCAmelCase = [input_ids, input_mask]
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Tuple , __a : List[Any] , __a : int , __a : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : List[Any] ) -> int:
__UpperCAmelCase = TFDistilBertForMaskedLM(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[int] , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Optional[int] ) -> Dict:
__UpperCAmelCase = TFDistilBertForQuestionAnswering(config=__a )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Any , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : Dict , __a : int , __a : List[Any] ) -> Dict:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForSequenceClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] , __a : Dict ) -> str:
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = TFDistilBertForMultipleChoice(__a )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : Tuple , __a : int , __a : Optional[int] , __a : Optional[int] ) -> int:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForTokenClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : str ) -> Any:
__UpperCAmelCase = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = TFDistilBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , dim=3_7 )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ) -> str:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__a )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__a )
def snake_case__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__a )
def snake_case__ ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a )
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a )
def snake_case__ ( self : List[str] ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__a )
@slow
def snake_case__ ( self : Dict ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCAmelCase = TFDistilBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class A ( unittest.TestCase ):
@slow
def snake_case__ ( self : int ) -> Dict:
__UpperCAmelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase = model(__a )[0]
__UpperCAmelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , __a )
__UpperCAmelCase = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
| 654 | 1 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__lowerCAmelCase : Optional[int] = re.compile(r"^(?P<major>\d+)" r"\.(?P<minor>\d+)" r"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class A :
a_ = 42
a_ = None
a_ = None
a_ = None
a_ = None
def snake_case__ ( self : str ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = _str_to_version_tuple(self.version_str )
def __repr__( self : List[Any] ) -> Optional[Any]:
return f"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def snake_case__ ( self : Dict ) -> Union[str, Any]:
return self.major, self.minor, self.patch
def snake_case__ ( self : int , __a : List[str] ) -> Dict:
if isinstance(__a , __a ):
return Version(__a )
elif isinstance(__a , __a ):
return other
raise TypeError(f"""{other} (type {type(__a )}) cannot be compared to version.""" )
def __eq__( self : Any , __a : Tuple ) -> Tuple:
try:
__UpperCAmelCase = self._validate_operand(__a )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Optional[int] , __a : str ) -> Union[str, Any]:
__UpperCAmelCase = self._validate_operand(__a )
return self.tuple < other.tuple
def __hash__( self : int ) -> Optional[int]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def snake_case__ ( cls : List[Any] , __a : List[str] ) -> Union[str, Any]:
__UpperCAmelCase = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def snake_case__ ( self : Union[str, Any] ) -> str:
return self.version_str
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = _VERSION_REG.match(UpperCamelCase__ )
if not res:
raise ValueError(f"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(UpperCamelCase__ ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def lowerCAmelCase ( UpperCamelCase__ : List[Any] ):
"""simple docstring"""
return ".".join(str(UpperCamelCase__ ) for v in version_tuple )
| 654 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowerCAmelCase : List[Any] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 | 1 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float ):
"""simple docstring"""
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class A ( UpperCAmelCase ):
a_ = '''bert-generation'''
def __init__( self : str , __a : str=5_0_3_5_8 , __a : int=1_0_2_4 , __a : Optional[Any]=2_4 , __a : Any=1_6 , __a : int=4_0_9_6 , __a : Any="gelu" , __a : Union[str, Any]=0.1 , __a : Any=0.1 , __a : Union[str, Any]=5_1_2 , __a : int=0.0_2 , __a : str=1e-12 , __a : List[str]=0 , __a : Optional[int]=2 , __a : Tuple=1 , __a : str="absolute" , __a : Optional[Any]=True , **__a : Tuple , ) -> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
| 654 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A ( UpperCAmelCase , unittest.TestCase ):
a_ = ShapEImgaImgPipeline
a_ = ['''image''']
a_ = ['''image''']
a_ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
a_ = False
@property
def snake_case__ ( self : Tuple ) -> List[str]:
return 3_2
@property
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
return 3_2
@property
def snake_case__ ( self : Optional[int] ) -> str:
return self.time_input_dim * 4
@property
def snake_case__ ( self : List[Any] ) -> int:
return 8
@property
def snake_case__ ( self : Optional[int] ) -> str:
torch.manual_seed(0 )
__UpperCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=6_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__UpperCAmelCase = CLIPVisionModel(__a )
return model
@property
def snake_case__ ( self : int ) -> Union[str, Any]:
__UpperCAmelCase = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=__a , do_normalize=__a , do_resize=__a , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=2_2_4 , )
return image_processor
@property
def snake_case__ ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
__UpperCAmelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
__UpperCAmelCase = PriorTransformer(**__a )
return model
@property
def snake_case__ ( self : Dict ) -> Dict:
torch.manual_seed(0 )
__UpperCAmelCase = {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
__UpperCAmelCase = ShapERenderer(**__a )
return model
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
__UpperCAmelCase = self.dummy_prior
__UpperCAmelCase = self.dummy_image_encoder
__UpperCAmelCase = self.dummy_image_processor
__UpperCAmelCase = self.dummy_renderer
__UpperCAmelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_0_2_4 , prediction_type='''sample''' , use_karras_sigmas=__a , clip_sample=__a , clip_sample_range=1.0 , )
__UpperCAmelCase = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def snake_case__ ( self : Tuple , __a : Dict , __a : Union[str, Any]=0 ) -> List[str]:
__UpperCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(__a )
else:
__UpperCAmelCase = torch.Generator(device=__a ).manual_seed(__a )
__UpperCAmelCase = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase = '''cpu'''
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = self.pipeline_class(**__a )
__UpperCAmelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = pipe(**self.get_dummy_inputs(__a ) )
__UpperCAmelCase = output.images[0]
__UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
__UpperCAmelCase = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self : str ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case__ ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase = torch_device == '''cpu'''
__UpperCAmelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__a , relax_max_difference=__a , )
def snake_case__ ( self : Dict ) -> List[str]:
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = self.pipeline_class(**__a )
__UpperCAmelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = 1
__UpperCAmelCase = 2
__UpperCAmelCase = self.get_dummy_inputs(__a )
for key in inputs.keys():
if key in self.batch_params:
__UpperCAmelCase = batch_size * [inputs[key]]
__UpperCAmelCase = pipe(**__a , num_images_per_prompt=__a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def snake_case__ ( self : Any ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Dict ) -> Any:
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
__UpperCAmelCase = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
__UpperCAmelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = torch.Generator(device=__a ).manual_seed(0 )
__UpperCAmelCase = pipe(
__a , generator=__a , guidance_scale=3.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='''np''' , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(__a , __a )
| 654 |
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__lowerCAmelCase : str = 299_792_458
# Symbols
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = symbols("ct x y z")
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 )
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return np.array(
[
[gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0],
[-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : np.ndarray | None = None ):
"""simple docstring"""
# Ensure event is not empty
if event is None:
__UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCamelCase__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__lowerCAmelCase : Dict = transform(29_979_245)
print("Example of four vector: ")
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__lowerCAmelCase : Union[str, Any] = {ct: c, x: 1, y: 1, z: 1}
__lowerCAmelCase : Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 654 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( UpperCAmelCase , unittest.TestCase ):
a_ = BioGptTokenizer
a_ = False
def snake_case__ ( self : Any ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(__a , range(len(__a ) ) ) )
__UpperCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__a ) )
def snake_case__ ( self : Dict , __a : Any ) -> List[Any]:
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = '''lower newer'''
return input_text, output_text
def snake_case__ ( self : Any ) -> Tuple:
__UpperCAmelCase = BioGptTokenizer(self.vocab_file , self.merges_file )
__UpperCAmelCase = '''lower'''
__UpperCAmelCase = ['''low''', '''er</w>''']
__UpperCAmelCase = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__UpperCAmelCase = tokens + ['''<unk>''']
__UpperCAmelCase = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
@slow
def snake_case__ ( self : List[Any] ) -> List[str]:
__UpperCAmelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
__UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__a )
__UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__a )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__a )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__a , __a )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 654 |
'''simple docstring'''
import heapq
import sys
import numpy as np
__lowerCAmelCase : Any = tuple[int, int]
class A :
def __init__( self : Optional[int] ) -> int:
__UpperCAmelCase = []
__UpperCAmelCase = set()
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def snake_case__ ( self : Dict ) -> Optional[int]:
return len(self.elements ) == 0
def snake_case__ ( self : Optional[int] , __a : Optional[Any] , __a : Dict ) -> Optional[Any]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__a )
else:
# update
# print("update", item)
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def snake_case__ ( self : int , __a : Any ) -> int:
if item in self.set:
self.set.remove(__a )
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def snake_case__ ( self : List[str] ) -> Dict:
return self.elements[0][1]
def snake_case__ ( self : Any ) -> List[str]:
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
self.set.remove(__a )
return (priority, item)
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# euclidean distance
__UpperCAmelCase = np.array(UpperCamelCase__ )
__UpperCAmelCase = np.array(UpperCamelCase__ )
return np.linalg.norm(a - b )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# integer division by time variable
return consistent_heuristic(UpperCamelCase__ , UpperCamelCase__ ) // t
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] ):
"""simple docstring"""
__UpperCAmelCase = g_function[start] + Wa * heuristics[i](UpperCamelCase__ , UpperCamelCase__ )
return ans
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase = np.chararray((n, n) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
__UpperCAmelCase = '''*'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (j, (n - 1) - i) in blocks:
__UpperCAmelCase = '''#'''
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[goal]
while x != start:
((__UpperCAmelCase) , (__UpperCAmelCase)) = x
# print(x)
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[x]
__UpperCAmelCase = '''-'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
__UpperCAmelCase = back_pointer[goal]
while x != start:
print(UpperCamelCase__ , end=''' ''' )
__UpperCAmelCase = back_pointer[x]
print(UpperCamelCase__ )
sys.exit()
def lowerCAmelCase ( UpperCamelCase__ : TPos ):
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , ):
"""simple docstring"""
for itera in range(UpperCamelCase__ ):
open_list[itera].remove_element(UpperCamelCase__ )
# print("s", s)
# print("j", j)
((__UpperCAmelCase) , (__UpperCAmelCase)) = s
__UpperCAmelCase = (x - 1, y)
__UpperCAmelCase = (x + 1, y)
__UpperCAmelCase = (x, y + 1)
__UpperCAmelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(UpperCamelCase__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(UpperCamelCase__ )
__UpperCAmelCase = -1
__UpperCAmelCase = float('''inf''' )
if valid(UpperCamelCase__ ) and g_function[neighbours] > g_function[s] + 1:
__UpperCAmelCase = g_function[s] + 1
__UpperCAmelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(UpperCamelCase__ , key(UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ) )
if neighbours not in close_list_inad:
for var in range(1 , UpperCamelCase__ ):
if key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) <= Wa * key(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ):
open_list[j].put(
UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
__lowerCAmelCase : Optional[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__lowerCAmelCase : List[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__lowerCAmelCase : Dict = make_common_ground()
__lowerCAmelCase : int = blocks_blk
# hyper parameters
__lowerCAmelCase : Dict = 1
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : Union[str, Any] = 20
__lowerCAmelCase : Any = 3 # one consistent and two other inconsistent
# start and end destination
__lowerCAmelCase : Optional[Any] = (0, 0)
__lowerCAmelCase : Any = (n - 1, n - 1)
__lowerCAmelCase : Optional[int] = 1
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = {start: 0, goal: float('''inf''' )}
__UpperCAmelCase = {start: -1, goal: -1}
__UpperCAmelCase = []
__UpperCAmelCase = set()
for i in range(UpperCamelCase__ ):
open_list.append(PriorityQueue() )
open_list[i].put(UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
__UpperCAmelCase = []
__UpperCAmelCase = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , UpperCamelCase__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase , __UpperCAmelCase = open_list[i].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_inad.append(UpperCamelCase__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase = open_list[0].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_anchor.append(UpperCamelCase__ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(UpperCamelCase__ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 654 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 654 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__lowerCAmelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : str = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase : int = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__lowerCAmelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase : List[str] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__lowerCAmelCase : Optional[int] = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
__UpperCAmelCase = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = tf_models
__UpperCAmelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = flax_models
__UpperCAmelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = pt_models
__UpperCAmelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCAmelCase = True
break
# Try again after removing the last word in the name
__UpperCAmelCase = ''''''.join(camel_case_split(UpperCamelCase__ )[:-1] )
__UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCAmelCase = list(UpperCamelCase__ )
all_models.sort()
__UpperCAmelCase = {'''model_type''': all_models}
__UpperCAmelCase = [pt_models[t] for t in all_models]
__UpperCAmelCase = [tf_models[t] for t in all_models]
__UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCAmelCase = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCAmelCase = '''AutoTokenizer'''
__UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCAmelCase = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
__UpperCAmelCase = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
__UpperCAmelCase = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = get_frameworks_table()
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
__UpperCAmelCase = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=UpperCamelCase__ )
__UpperCAmelCase = Dataset.from_json(UpperCamelCase__ )
__UpperCAmelCase = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(UpperCamelCase__ ) )
}
__UpperCAmelCase = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCAmelCase = sorted(table.keys() )
__UpperCAmelCase = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , '''pipeline_tags.json''' ) )
if commit_sha is not None:
__UpperCAmelCase = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCAmelCase = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=UpperCamelCase__ , repo_type='''dataset''' , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCAmelCase = pipeline_tasks[key]['''pt''']
if isinstance(UpperCamelCase__ , (list, tuple) ):
__UpperCAmelCase = model[0]
__UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ''', '''.join(UpperCamelCase__ )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__lowerCAmelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 654 | 1 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__lowerCAmelCase : List[str] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
__lowerCAmelCase : Optional[Any] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
__lowerCAmelCase : int = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def snake_case__ ( self : List[Any] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def snake_case__ ( self : str , __a : List[Any] , __a : Union[str, Any] , __a : Union[str, Any]=None , __a : Optional[Any]=None , __a : Dict=None , __a : Dict=None , __a : int="auto" , __a : List[Any]=-1 , __a : Any=0.9 , __a : Tuple=5 , __a : str=5_0_0 , __a : int="gpt2-large" , __a : List[Any]=-1 , __a : Optional[int]=1_0_2_4 , __a : Tuple=2_5 , __a : List[str]=5 , __a : int=True , __a : str=2_5 , ) -> List[Any]:
__UpperCAmelCase = compute_mauve(
p_text=__a , q_text=__a , p_features=__a , q_features=__a , p_tokens=__a , q_tokens=__a , num_buckets=__a , pca_max_data=__a , kmeans_explained_var=__a , kmeans_num_redo=__a , kmeans_max_iter=__a , featurize_model_name=__a , device_id=__a , max_text_length=__a , divergence_curve_discretization_size=__a , mauve_scaling_factor=__a , verbose=__a , seed=__a , )
return out
| 654 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCAmelCase : Optional[int] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class A ( unittest.TestCase ):
def snake_case__ ( self : Any , __a : str , __a : bool , __a : str = None , __a : list = None ) -> Tuple:
__UpperCAmelCase = None
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
__UpperCAmelCase = os.path.abspath('''examples''' )
for item in os.listdir(__a ):
if item not in EXCLUDE_EXAMPLES:
__UpperCAmelCase = os.path.join(__a , __a )
if os.path.isfile(__a ) and ".py" in item_path:
with self.subTest(
tested_script=__a , feature_script=__a , tested_section='''main()''' if parser_only else '''training_function()''' , ):
__UpperCAmelCase = compare_against_test(
os.path.join(__a , __a ) , __a , __a , __a )
__UpperCAmelCase = '''\n'''.join(__a )
if special_strings is not None:
for string in special_strings:
__UpperCAmelCase = diff.replace(__a , '''''' )
self.assertEqual(__a , '''''' )
def snake_case__ ( self : Optional[Any] ) -> str:
self.one_complete_example('''complete_nlp_example.py''' , __a )
self.one_complete_example('''complete_nlp_example.py''' , __a )
def snake_case__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
__UpperCAmelCase = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class A ( UpperCAmelCase ):
a_ = False
@classmethod
def snake_case__ ( cls : Tuple ) -> str:
super().setUpClass()
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Dict ) -> int:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
def snake_case__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
if torch.cuda.is_available():
__UpperCAmelCase = torch.cuda.device_count()
else:
__UpperCAmelCase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
else:
self.assertIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
@slow
def snake_case__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
__UpperCAmelCase = re.findall('''({.+})''' , __a )
__UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1]
__UpperCAmelCase = ast.literal_eval(__a )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case__ ( self : Dict ) -> int:
__UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCAmelCase = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__a , '''tracking''' ) ) )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 654 | 1 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 654 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCAmelCase : Any = ""
__lowerCAmelCase : int = ""
__lowerCAmelCase : Union[str, Any] = ""
__lowerCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase = get_dataset(UpperCamelCase__ , UpperCamelCase__ )
print('''Processing...''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = update_image_and_anno(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for index, image in enumerate(UpperCamelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCAmelCase = random_chars(3_2 )
__UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCAmelCase = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"""Success {index+1}/{len(UpperCamelCase__ )} with {file_name}""" )
__UpperCAmelCase = []
for anno in new_annos[index]:
__UpperCAmelCase = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(UpperCamelCase__ )
with open(f"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
for label_file in glob.glob(os.path.join(UpperCamelCase__ , '''*.txt''' ) ):
__UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(UpperCamelCase__ ) as in_file:
__UpperCAmelCase = in_file.readlines()
__UpperCAmelCase = os.path.join(UpperCamelCase__ , f"""{label_name}.jpg""" )
__UpperCAmelCase = []
for obj_list in obj_lists:
__UpperCAmelCase = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCamelCase__ )
labels.append(UpperCamelCase__ )
return img_paths, labels
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int = 1 ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = []
for idx in range(len(UpperCamelCase__ ) ):
__UpperCAmelCase = []
__UpperCAmelCase = img_list[idx]
path_list.append(UpperCamelCase__ )
__UpperCAmelCase = anno_list[idx]
__UpperCAmelCase = cva.imread(UpperCamelCase__ )
if flip_type == 1:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCamelCase__ )
new_imgs_list.append(UpperCamelCase__ )
return new_imgs_list, new_annos_lists, path_list
def lowerCAmelCase ( UpperCamelCase__ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 654 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class A :
a_ = PegasusConfig
a_ = {}
a_ = '''gelu'''
def __init__( self : Tuple , __a : int , __a : List[str]=1_3 , __a : Dict=7 , __a : Dict=True , __a : Dict=False , __a : Dict=9_9 , __a : Tuple=3_2 , __a : Optional[Any]=2 , __a : Optional[int]=4 , __a : str=3_7 , __a : str=0.1 , __a : Optional[Any]=0.1 , __a : List[Any]=4_0 , __a : str=2 , __a : Union[str, Any]=1 , __a : int=0 , ) -> List[str]:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = pad_token_id
__UpperCAmelCase = bos_token_id
def snake_case__ ( self : Optional[int] ) -> str:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCAmelCase = prepare_pegasus_inputs_dict(__a , __a , __a )
return config, inputs_dict
def snake_case__ ( self : Optional[int] , __a : Optional[int] , __a : List[Any] ) -> int:
__UpperCAmelCase = TFPegasusModel(config=__a ).get_decoder()
__UpperCAmelCase = inputs_dict['''input_ids''']
__UpperCAmelCase = input_ids[:1, :]
__UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
__UpperCAmelCase = inputs_dict['''head_mask''']
__UpperCAmelCase = 1
# first forward pass
__UpperCAmelCase = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
__UpperCAmelCase , __UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCAmelCase = model(__a , attention_mask=__a )[0]
__UpperCAmelCase = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
__UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=None , ):
"""simple docstring"""
if attention_mask is None:
__UpperCAmelCase = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
a_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
a_ = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
a_ = True
a_ = False
a_ = False
def snake_case__ ( self : int ) -> Optional[Any]:
__UpperCAmelCase = TFPegasusModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a )
def snake_case__ ( self : Tuple ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case__ ( self : Dict ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
@require_sentencepiece
@require_tokenizers
@require_tf
class A ( unittest.TestCase ):
a_ = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
a_ = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
a_ = '''google/pegasus-xsum'''
@cached_property
def snake_case__ ( self : str ) -> Any:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def snake_case__ ( self : Dict ) -> Tuple:
__UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def snake_case__ ( self : Optional[int] , **__a : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase = self.translate_src_text(**__a )
assert self.expected_text == generated_words
def snake_case__ ( self : str , **__a : List[str] ) -> str:
__UpperCAmelCase = self.tokenizer(self.src_text , **__a , padding=__a , return_tensors='''tf''' )
__UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__a , )
__UpperCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__a )
return generated_words
@slow
def snake_case__ ( self : int ) -> Tuple:
self._assert_generated_batch_equal_expected()
| 654 |
'''simple docstring'''
from pathlib import Path
import fire
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = Path(UpperCamelCase__ )
__UpperCAmelCase = Path(UpperCamelCase__ )
dest_dir.mkdir(exist_ok=UpperCamelCase__ )
for path in src_dir.iterdir():
__UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
__UpperCAmelCase = dest_dir.joinpath(path.name )
print(UpperCamelCase__ )
dest_path.open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 654 | 1 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowerCAmelCase : List[str] = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
__UpperCAmelCase = test_results.split(''' ''' )
__UpperCAmelCase = 0
__UpperCAmelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__UpperCAmelCase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(UpperCamelCase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCAmelCase ( UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = {}
__UpperCAmelCase = None
__UpperCAmelCase = False
for line in failures_short_lines.split('''\n''' ):
if re.search(R'''_ \[doctest\]''' , UpperCamelCase__ ):
__UpperCAmelCase = True
__UpperCAmelCase = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
__UpperCAmelCase = line
__UpperCAmelCase = False
return failures
class A :
def __init__( self : Union[str, Any] , __a : str , __a : Dict ) -> str:
__UpperCAmelCase = title
__UpperCAmelCase = doc_test_results['''time_spent'''].split(''',''' )[0]
__UpperCAmelCase = doc_test_results['''success''']
__UpperCAmelCase = doc_test_results['''failures''']
__UpperCAmelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
__UpperCAmelCase = doc_test_results
@property
def snake_case__ ( self : Optional[int] ) -> str:
__UpperCAmelCase = [self._time_spent]
__UpperCAmelCase = 0
for time in time_spent:
__UpperCAmelCase = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__a ) == 1:
__UpperCAmelCase = [0, 0, time_parts[0]]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_6_0_0 + minutes * 6_0 + seconds
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = total_secs // 3_6_0_0, (total_secs % 3_6_0_0) // 6_0, total_secs % 6_0
return f"""{int(__a )}h{int(__a )}m{int(__a )}s"""
@property
def snake_case__ ( self : Optional[int] ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def snake_case__ ( self : List[str] ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def snake_case__ ( self : int ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
f""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def snake_case__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase = 4_0
__UpperCAmelCase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(__a , __a )}
__UpperCAmelCase = ''''''
for category, failures in category_failures.items():
if len(__a ) == 0:
continue
if report != "":
report += "\n\n"
report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def snake_case__ ( self : str ) -> str:
__UpperCAmelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__a )
@staticmethod
def snake_case__ ( ) -> Dict:
__UpperCAmelCase = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(__a )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=__a , )
def snake_case__ ( self : int ) -> Union[str, Any]:
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
__UpperCAmelCase = f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else '''All tests passed.'''
__UpperCAmelCase = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=__a , )
def snake_case__ ( self : Tuple , __a : Optional[Any] , __a : Union[str, Any] , __a : str , __a : List[Any] ) -> Tuple:
__UpperCAmelCase = ''''''
for key, value in failures.items():
__UpperCAmelCase = value[:2_0_0] + ''' [Truncated]''' if len(__a ) > 2_5_0 else value
failures_text += f"""*{key}*\n_{value}_\n\n"""
__UpperCAmelCase = job_name
__UpperCAmelCase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
__UpperCAmelCase = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def snake_case__ ( self : str ) -> List[str]:
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
__UpperCAmelCase = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
__UpperCAmelCase = sorted(self.doc_test_results.items() , key=lambda __a : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
__UpperCAmelCase = f"""*Num failures* :{len(job_result['failed'] )} \n"""
__UpperCAmelCase = job_result['''failures''']
__UpperCAmelCase = self.get_reply_blocks(__a , __a , __a , text=__a )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=f"""Results for {job}""" , blocks=__a , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = os.environ['''GITHUB_RUN_ID''']
__UpperCAmelCase = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
__UpperCAmelCase = requests.get(UpperCamelCase__ ).json()
__UpperCAmelCase = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__UpperCAmelCase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = requests.get(url + f"""&page={i + 2}""" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , UpperCamelCase__ )
return {}
def lowerCAmelCase ( UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = {}
if os.path.exists(UpperCamelCase__ ):
__UpperCAmelCase = os.listdir(UpperCamelCase__ )
for file in files:
try:
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , encoding='''utf-8''' ) as f:
__UpperCAmelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"""Could not open {os.path.join(UpperCamelCase__ , UpperCamelCase__ )}.""" ) from e
return _artifact
def lowerCAmelCase ( ):
"""simple docstring"""
class A :
def __init__( self : Union[str, Any] , __a : str ) -> int:
__UpperCAmelCase = name
__UpperCAmelCase = []
def __str__( self : str ) -> List[str]:
return self.name
def snake_case__ ( self : Optional[Any] , __a : str ) -> List[Any]:
self.paths.append({'''name''': self.name, '''path''': path} )
__UpperCAmelCase = {}
__UpperCAmelCase = filter(os.path.isdir , os.listdir() )
for directory in directories:
__UpperCAmelCase = directory
if artifact_name not in _available_artifacts:
__UpperCAmelCase = Artifact(UpperCamelCase__ )
_available_artifacts[artifact_name].add_path(UpperCamelCase__ )
return _available_artifacts
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = get_job_links()
__lowerCAmelCase : Any = retrieve_available_artifacts()
__lowerCAmelCase : List[Any] = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowerCAmelCase : Dict = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowerCAmelCase : str = github_actions_job_links.get("run_doctests")
__lowerCAmelCase : Dict = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
__lowerCAmelCase : Tuple = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = handle_test_results(artifact["stats"])
__lowerCAmelCase : Dict = failed
__lowerCAmelCase : str = success
__lowerCAmelCase : Optional[Any] = time_spent[1:-1] + ", "
__lowerCAmelCase : Dict = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
__lowerCAmelCase : Any = line.replace("FAILED ", "")
__lowerCAmelCase : Optional[Any] = line.split()[0].replace("\n", "")
if "::" in line:
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = line.split("::")
else:
__lowerCAmelCase , __lowerCAmelCase : Any = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowerCAmelCase : Optional[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowerCAmelCase : Optional[int] = all_failures[test] if test in all_failures else "N/A"
__lowerCAmelCase : Union[str, Any] = failure
break
__lowerCAmelCase : int = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 654 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = f"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase__ )
if number < 1:
__UpperCAmelCase = f"""Input value of [number={number}] must be > 0"""
raise ValueError(UpperCamelCase__ )
__UpperCAmelCase = 1
for i in range(1 , UpperCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 1 |
'''simple docstring'''
class A :
def __init__( self : Optional[Any] , __a : int , __a : List[str]=None , __a : Optional[Any]=None ) -> Dict:
__UpperCAmelCase = data
__UpperCAmelCase = previous
__UpperCAmelCase = next_node
def __str__( self : Tuple ) -> str:
return f"""{self.data}"""
def snake_case__ ( self : Dict ) -> int:
return self.data
def snake_case__ ( self : List[Any] ) -> Any:
return self.next
def snake_case__ ( self : str ) -> Dict:
return self.previous
class A :
def __init__( self : List[str] , __a : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase = head
def __iter__( self : Tuple ) -> List[Any]:
return self
def snake_case__ ( self : List[str] ) -> List[Any]:
if not self.current:
raise StopIteration
else:
__UpperCAmelCase = self.current.get_data()
__UpperCAmelCase = self.current.get_next()
return value
class A :
def __init__( self : int ) -> int:
__UpperCAmelCase = None # First node in list
__UpperCAmelCase = None # Last node in list
def __str__( self : List[str] ) -> List[str]:
__UpperCAmelCase = self.head
__UpperCAmelCase = []
while current is not None:
nodes.append(current.get_data() )
__UpperCAmelCase = current.get_next()
return " ".join(str(__a ) for node in nodes )
def __contains__( self : Optional[Any] , __a : int ) -> Union[str, Any]:
__UpperCAmelCase = self.head
while current:
if current.get_data() == value:
return True
__UpperCAmelCase = current.get_next()
return False
def __iter__( self : Union[str, Any] ) -> int:
return LinkedListIterator(self.head )
def snake_case__ ( self : Tuple ) -> Any:
if self.head:
return self.head.get_data()
return None
def snake_case__ ( self : List[Any] ) -> List[str]:
if self.tail:
return self.tail.get_data()
return None
def snake_case__ ( self : Optional[int] , __a : Node ) -> None:
if self.head is None:
__UpperCAmelCase = node
__UpperCAmelCase = node
else:
self.insert_before_node(self.head , __a )
def snake_case__ ( self : List[str] , __a : Node ) -> None:
if self.head is None:
self.set_head(__a )
else:
self.insert_after_node(self.tail , __a )
def snake_case__ ( self : Dict , __a : int ) -> None:
__UpperCAmelCase = Node(__a )
if self.head is None:
self.set_head(__a )
else:
self.set_tail(__a )
def snake_case__ ( self : List[Any] , __a : Node , __a : Node ) -> None:
__UpperCAmelCase = node
__UpperCAmelCase = node.previous
if node.get_previous() is None:
__UpperCAmelCase = node_to_insert
else:
__UpperCAmelCase = node_to_insert
__UpperCAmelCase = node_to_insert
def snake_case__ ( self : int , __a : Node , __a : Node ) -> None:
__UpperCAmelCase = node
__UpperCAmelCase = node.next
if node.get_next() is None:
__UpperCAmelCase = node_to_insert
else:
__UpperCAmelCase = node_to_insert
__UpperCAmelCase = node_to_insert
def snake_case__ ( self : Optional[Any] , __a : int , __a : int ) -> None:
__UpperCAmelCase = 1
__UpperCAmelCase = Node(__a )
__UpperCAmelCase = self.head
while node:
if current_position == position:
self.insert_before_node(__a , __a )
return
current_position += 1
__UpperCAmelCase = node.next
self.insert_after_node(self.tail , __a )
def snake_case__ ( self : Optional[int] , __a : int ) -> Node:
__UpperCAmelCase = self.head
while node:
if node.get_data() == item:
return node
__UpperCAmelCase = node.get_next()
raise Exception('''Node not found''' )
def snake_case__ ( self : Tuple , __a : int ) -> int:
if (node := self.get_node(__a )) is not None:
if node == self.head:
__UpperCAmelCase = self.head.get_next()
if node == self.tail:
__UpperCAmelCase = self.tail.get_previous()
self.remove_node_pointers(__a )
@staticmethod
def snake_case__ ( __a : Node ) -> None:
if node.get_next():
__UpperCAmelCase = node.previous
if node.get_previous():
__UpperCAmelCase = node.next
__UpperCAmelCase = None
__UpperCAmelCase = None
def snake_case__ ( self : int ) -> Any:
return self.head is None
def lowerCAmelCase ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class A ( nn.Module ):
def __init__( self : Optional[Any] ) -> int:
super().__init__()
__UpperCAmelCase = nn.Linear(3 , 4 )
__UpperCAmelCase = nn.BatchNormad(4 )
__UpperCAmelCase = nn.Linear(4 , 5 )
def snake_case__ ( self : List[str] , __a : Optional[int] ) -> Optional[int]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A ( unittest.TestCase ):
def snake_case__ ( self : Optional[int] ) -> Any:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def snake_case__ ( self : Any ) -> int:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__a : Optional[int] ):
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : Any ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Union[str, Any] , __a : int ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__a ) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def snake_case__ ( self : Any ) -> List[Any]:
__UpperCAmelCase = torch.cuda.memory_allocated()
__UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __a )
__UpperCAmelCase = release_memory(__a )
self.assertEqual(torch.cuda.memory_allocated() , __a )
| 654 | 1 |
'''simple docstring'''
__lowerCAmelCase : Dict = 0 # The first color of the flag.
__lowerCAmelCase : Optional[Any] = 1 # The second color of the flag.
__lowerCAmelCase : List[Any] = 2 # The third color of the flag.
__lowerCAmelCase : int = (red, white, blue)
def lowerCAmelCase ( UpperCamelCase__ : list ):
"""simple docstring"""
if not sequence:
return []
if len(UpperCamelCase__ ) == 1:
return list(UpperCamelCase__ )
__UpperCAmelCase = 0
__UpperCAmelCase = len(UpperCamelCase__ ) - 1
__UpperCAmelCase = 0
while mid <= high:
if sequence[mid] == colors[0]:
__UpperCAmelCase , __UpperCAmelCase = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__UpperCAmelCase , __UpperCAmelCase = sequence[high], sequence[mid]
high -= 1
else:
__UpperCAmelCase = f"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(UpperCamelCase__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[str] = input("Enter numbers separated by commas:\n").strip()
__lowerCAmelCase : int = [int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 654 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = u
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = temp * (u - i)
return temp
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = int(input('''enter the numbers of values: ''' ) )
__UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
__UpperCAmelCase = 0
print('''enter the values of parameters in a list: ''' )
__UpperCAmelCase = list(map(UpperCamelCase__ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = float(input() )
__UpperCAmelCase = int(input('''enter the value to interpolate: ''' ) )
__UpperCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
__UpperCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
__UpperCAmelCase = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 654 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__lowerCAmelCase : List[Any] = "hf-internal-testing/tiny-random-bert"
__lowerCAmelCase : int = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
__lowerCAmelCase : List[str] = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class A ( unittest.TestCase ):
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase = cached_file(__a , __a )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__a ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__a , __a ) ) )
with open(os.path.join(__a , '''refs''' , '''main''' ) ) as f:
__UpperCAmelCase = f.read()
self.assertEqual(__a , os.path.join(__a , '''snapshots''' , __a , __a ) )
self.assertTrue(os.path.isfile(__a ) )
# File is cached at the same place the second time.
__UpperCAmelCase = cached_file(__a , __a )
self.assertEqual(__a , __a )
# Using a specific revision to test the full commit hash.
__UpperCAmelCase = cached_file(__a , __a , revision='''9b8c223''' )
self.assertEqual(__a , os.path.join(__a , '''snapshots''' , __a , __a ) )
def snake_case__ ( self : Tuple ) -> List[Any]:
with self.assertRaisesRegex(__a , '''is not a valid model identifier''' ):
__UpperCAmelCase = cached_file('''tiny-random-bert''' , __a )
with self.assertRaisesRegex(__a , '''is not a valid git identifier''' ):
__UpperCAmelCase = cached_file(__a , __a , revision='''aaaa''' )
with self.assertRaisesRegex(__a , '''does not appear to have a file named''' ):
__UpperCAmelCase = cached_file(__a , '''conf''' )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
with self.assertRaisesRegex(__a , '''does not appear to have a file named''' ):
__UpperCAmelCase = cached_file(__a , '''conf''' )
with open(os.path.join(__a , '''refs''' , '''main''' ) ) as f:
__UpperCAmelCase = f.read()
self.assertTrue(os.path.isfile(os.path.join(__a , '''.no_exist''' , __a , '''conf''' ) ) )
__UpperCAmelCase = cached_file(__a , '''conf''' , _raise_exceptions_for_missing_entries=__a )
self.assertIsNone(__a )
__UpperCAmelCase = cached_file(__a , '''conf''' , local_files_only=__a , _raise_exceptions_for_missing_entries=__a )
self.assertIsNone(__a )
__UpperCAmelCase = mock.Mock()
__UpperCAmelCase = 5_0_0
__UpperCAmelCase = {}
__UpperCAmelCase = HTTPError
__UpperCAmelCase = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__a ) as mock_head:
__UpperCAmelCase = cached_file(__a , '''conf''' , _raise_exceptions_for_connection_errors=__a )
self.assertIsNone(__a )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case__ ( self : Optional[Any] ) -> Optional[Any]:
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __a ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __a ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __a ) )
def snake_case__ ( self : Optional[int] ) -> int:
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__a , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , __a )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__a , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , __a , revision='''ahaha''' )
__UpperCAmelCase = get_file_from_repo('''bert-base-cased''' , __a )
# The name is the cached name which is not very easy to test, so instead we load the content.
__UpperCAmelCase = json.loads(open(__a , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_6_8 )
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = Path(__a ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(__a , '''a.txt''' ) , str(__a ) )
self.assertIsNone(get_file_from_repo(__a , '''b.txt''' ) )
| 654 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__UpperCAmelCase = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
__UpperCAmelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
__UpperCAmelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = dct.pop(UpperCamelCase__ )
__UpperCAmelCase = val
def lowerCAmelCase ( UpperCamelCase__ : Dict ):
"""simple docstring"""
if "handwritten" in checkpoint_url:
__UpperCAmelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCamelCase__ )
__UpperCAmelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__UpperCAmelCase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = 4_0_9_6
__UpperCAmelCase = 2_4
__UpperCAmelCase = 1_6
__UpperCAmelCase = 1_0_2_4
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = False
__UpperCAmelCase = '''relu'''
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
# load HuggingFace model
__UpperCAmelCase = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ )
__UpperCAmelCase = TrOCRForCausalLM(UpperCamelCase__ )
__UpperCAmelCase = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
__UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , check_hash=UpperCamelCase__ )['''model''']
__UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
__UpperCAmelCase = val
else:
__UpperCAmelCase = val
# load state dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
__UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size )
__UpperCAmelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
__UpperCAmelCase = TrOCRProcessor(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = processor(images=prepare_img(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
__UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__UpperCAmelCase = model(pixel_values=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ )
__UpperCAmelCase = outputs.logits
__UpperCAmelCase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , UpperCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 654 | 1 |
'''simple docstring'''
import qiskit
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
__UpperCAmelCase = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__UpperCAmelCase = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 654 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A ( unittest.TestCase ):
def snake_case__ ( self : List[Any] , __a : List[str] , __a : Optional[Any] ) -> List[Any]:
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(__a ) for s in shape] )}.npy"""
def snake_case__ ( self : Dict ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case__ ( self : Optional[Any] , __a : Tuple=0 , __a : List[Any]=(4, 4, 6_4, 6_4) , __a : Optional[Any]=False ) -> Tuple:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return image
def snake_case__ ( self : int , __a : Optional[Any]=False , __a : Optional[Any]="CompVis/stable-diffusion-v1-4" ) -> Any:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = '''bf16''' if fpaa else None
__UpperCAmelCase , __UpperCAmelCase = FlaxUNetaDConditionModel.from_pretrained(
__a , subfolder='''unet''' , dtype=__a , revision=__a )
return model, params
def snake_case__ ( self : str , __a : int=0 , __a : Tuple=(4, 7_7, 7_6_8) , __a : Optional[int]=False ) -> Union[str, Any]:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[1_7, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1_0_0_0, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def snake_case__ ( self : Tuple , __a : Tuple , __a : str , __a : Optional[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[1_7, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1_0_0_0, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def snake_case__ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , shape=(4, 4, 9_6, 9_6) , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , shape=(4, 7_7, 1_0_2_4) , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
| 654 | 1 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__lowerCAmelCase : Union[str, Any] = True
except (ImportError, ModuleNotFoundError):
__lowerCAmelCase : List[Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def lowerCAmelCase ( UpperCamelCase__ : str ):
"""simple docstring"""
re.sub('''<n>''' , '''''' , UpperCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) )
| 654 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[int] = "examples/"
__lowerCAmelCase : Dict = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__lowerCAmelCase : List[str] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__lowerCAmelCase : int = "README.md"
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
"""simple docstring"""
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase , __UpperCAmelCase = REPLACE_PATTERNS[pattern]
__UpperCAmelCase = replace.replace('''VERSION''' , UpperCamelCase__ )
__UpperCAmelCase = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern='''examples''' )
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Any=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not patch:
update_version_in_examples(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
__UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.readlines()
# Find the start of the list.
__UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase__ ).groups()[0]
return packaging.version.parse(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Any=False ):
"""simple docstring"""
__UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__UpperCAmelCase = default_version.base_version
elif patch:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = get_version()
__UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
__UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__lowerCAmelCase : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 654 | 1 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def lowerCAmelCase ( UpperCamelCase__ : dict[int, list[int]] ):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = len(UpperCamelCase__ ) # No of vertices in graph
__UpperCAmelCase = [0] * n
__UpperCAmelCase = [False] * n
def dfs(UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ):
__UpperCAmelCase = True
__UpperCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , id_ )
__UpperCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
__UpperCAmelCase = min(low[at] , low[to] )
__UpperCAmelCase = []
for i in range(UpperCamelCase__ ):
if not visited[i]:
dfs(UpperCamelCase__ , -1 , UpperCamelCase__ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__UpperCAmelCase = len(UpperCamelCase__ )
__UpperCAmelCase = max(UpperCamelCase__ )
__UpperCAmelCase = min(UpperCamelCase__ )
# create the counting array
__UpperCAmelCase = coll_max + 1 - coll_min
__UpperCAmelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__UpperCAmelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
__UpperCAmelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__lowerCAmelCase : str = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 654 | 1 |
'''simple docstring'''
from manim import *
class A ( UpperCAmelCase ):
def snake_case__ ( self : str ) -> List[str]:
__UpperCAmelCase = Rectangle(height=0.5 , width=0.5 )
__UpperCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__UpperCAmelCase = Rectangle(height=0.2_5 , width=0.2_5 )
__UpperCAmelCase = [mem.copy() for i in range(6 )]
__UpperCAmelCase = [mem.copy() for i in range(6 )]
__UpperCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__UpperCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__UpperCAmelCase = VGroup(__a , __a ).arrange(__a , buff=0 )
__UpperCAmelCase = Text('''CPU''' , font_size=2_4 )
__UpperCAmelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
__UpperCAmelCase = [mem.copy() for i in range(4 )]
__UpperCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__UpperCAmelCase = Text('''GPU''' , font_size=2_4 )
__UpperCAmelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
__UpperCAmelCase = [mem.copy() for i in range(6 )]
__UpperCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__UpperCAmelCase = Text('''Model''' , font_size=2_4 )
__UpperCAmelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
__UpperCAmelCase = []
__UpperCAmelCase = []
for i, rect in enumerate(__a ):
__UpperCAmelCase = fill.copy().set_fill(__a , opacity=0.8 )
target.move_to(__a )
model_arr.append(__a )
__UpperCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__a )
self.add(*__a , *__a )
__UpperCAmelCase = [meta_mem.copy() for i in range(6 )]
__UpperCAmelCase = [meta_mem.copy() for i in range(6 )]
__UpperCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__UpperCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__UpperCAmelCase = VGroup(__a , __a ).arrange(__a , buff=0 )
__UpperCAmelCase = Text('''Disk''' , font_size=2_4 )
__UpperCAmelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
disk.move_to([-4, -1.2_5, 0] )
self.add(__a , __a )
__UpperCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__UpperCAmelCase = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
__UpperCAmelCase = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__a )
__UpperCAmelCase = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a ) )
__UpperCAmelCase = Square(0.3 )
input.set_fill(__a , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __a , buff=0.5 )
self.play(Write(__a ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__a , buff=0.0_2 )
self.play(MoveToTarget(__a ) )
self.play(FadeOut(__a ) )
__UpperCAmelCase = Arrow(start=__a , end=__a , color=__a , buff=0.5 )
a.next_to(model_arr[0].get_left() , __a , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__UpperCAmelCase = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a , run_time=3 ) )
__UpperCAmelCase = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.0_2}
self.play(
Write(__a ) , Circumscribe(model_arr[0] , color=__a , **__a ) , Circumscribe(model_cpu_arr[0] , color=__a , **__a ) , Circumscribe(gpu_rect[0] , color=__a , **__a ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
__UpperCAmelCase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , __a , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
__UpperCAmelCase = AnimationGroup(
FadeOut(__a , run_time=0.5 ) , MoveToTarget(__a , run_time=0.5 ) , FadeIn(__a , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__a )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__UpperCAmelCase = 0.7
self.play(
Circumscribe(model_arr[i] , **__a ) , Circumscribe(cpu_left_col_base[i] , **__a ) , Circumscribe(cpu_left_col_base[i + 1] , color=__a , **__a ) , Circumscribe(gpu_rect[0] , color=__a , **__a ) , Circumscribe(model_arr[i + 1] , color=__a , **__a ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__a , **__a ) , Circumscribe(cpu_left_col_base[-1] , color=__a , **__a ) , Circumscribe(gpu_rect[0] , color=__a , **__a ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
__UpperCAmelCase = a_c
__UpperCAmelCase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(__a ) , FadeOut(__a , run_time=0.5 ) , )
__UpperCAmelCase = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__a , run_time=3 ) , MoveToTarget(__a ) )
self.wait()
| 654 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase ( UpperCamelCase__ : str = "AAPL" ):
"""simple docstring"""
__UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' )
__UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 654 | 1 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict ):
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_sql_dataset(UpperCamelCase__ , UpperCamelCase__ )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_sql_dataset(UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
with contextlib.closing(sqlitea.connect(UpperCamelCase__ ) ) as con:
__UpperCAmelCase = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = os.path.join(UpperCamelCase__ , '''tmp.sql''' )
__UpperCAmelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=UpperCamelCase__ ).read()
SqlDatasetWriter(UpperCamelCase__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
__UpperCAmelCase = iter_sql_file(UpperCamelCase__ )
__UpperCAmelCase = iter_sql_file(UpperCamelCase__ )
for rowa, rowa in zip(UpperCamelCase__ , UpperCamelCase__ ):
assert rowa == rowa
@require_sqlalchemy
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = os.path.join(UpperCamelCase__ , '''tmp.sql''' )
__UpperCAmelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=UpperCamelCase__ ).read()
SqlDatasetWriter(UpperCamelCase__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
__UpperCAmelCase = iter_sql_file(UpperCamelCase__ )
__UpperCAmelCase = iter_sql_file(UpperCamelCase__ )
for rowa, rowa in zip(UpperCamelCase__ , UpperCamelCase__ ):
assert rowa == rowa
@require_sqlalchemy
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = os.path.join(UpperCamelCase__ , '''tmp.sql''' )
__UpperCAmelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=UpperCamelCase__ ).read()
with pytest.raises(UpperCamelCase__ ):
SqlDatasetWriter(UpperCamelCase__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 654 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
__UpperCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i]
__UpperCAmelCase = []
__UpperCAmelCase = 0
__UpperCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__UpperCAmelCase = []
__UpperCAmelCase = -1
for i in range(UpperCamelCase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__UpperCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__UpperCAmelCase = 0
__UpperCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__lowerCAmelCase : List[Any] = 4
__lowerCAmelCase : List[Any] = [2, 5, 3, 7]
__lowerCAmelCase : Tuple = [0, 0, 0, 0]
__lowerCAmelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase : Dict = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 654 | 1 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__lowerCAmelCase : Optional[int] = data_utils.TransfoXLTokenizer
__lowerCAmelCase : Any = data_utils.TransfoXLCorpus
__lowerCAmelCase : Tuple = data_utils
__lowerCAmelCase : List[str] = data_utils
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCamelCase__ , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(UpperCamelCase__ , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__UpperCAmelCase = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
__UpperCAmelCase = corpus.vocab.__dict__
torch.save(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , UpperCamelCase__ )
__UpperCAmelCase = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__UpperCAmelCase = os.path.abspath(UpperCamelCase__ )
__UpperCAmelCase = os.path.abspath(UpperCamelCase__ )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__UpperCAmelCase = TransfoXLConfig()
else:
__UpperCAmelCase = TransfoXLConfig.from_json_file(UpperCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__UpperCAmelCase = TransfoXLLMHeadModel(UpperCamelCase__ )
__UpperCAmelCase = load_tf_weights_in_transfo_xl(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
__UpperCAmelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(f"""Save PyTorch model to {os.path.abspath(UpperCamelCase__ )}""" )
torch.save(model.state_dict() , UpperCamelCase__ )
print(f"""Save configuration file to {os.path.abspath(UpperCamelCase__ )}""" )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
__lowerCAmelCase : Dict = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 654 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[str] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : List[str] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : List[Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Optional[Any] , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Tuple , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : str , **__a : Tuple ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : int ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : List[str] , **__a : Optional[int] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Any ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Dict , **__a : List[str] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Optional[int] , **__a : Optional[int] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[str] , **__a : List[str] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[int] , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : str ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Any ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : str , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Optional[int] , **__a : Union[str, Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Union[str, Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Union[str, Any] , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : int , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : str ) -> Dict:
requires_backends(cls , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : str , **UpperCamelCase__ : str ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : str , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : str , **__a : List[str] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : List[Any] , **__a : List[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : Tuple ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : str , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : str ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : Tuple ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Tuple , **__a : str ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : str , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : int , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : str , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : int , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Union[str, Any] , **__a : Optional[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[Any] , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Dict ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Union[str, Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : Dict ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Tuple , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : Any ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Optional[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Union[str, Any] , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : Optional[int] , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Any , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : int , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Tuple , **__a : Optional[int] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : Tuple ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Union[str, Any] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[Any] , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : int , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Any , **__a : int ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Dict ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : int , **__a : Optional[int] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Dict , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Any , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : Tuple , **__a : Optional[int] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Optional[Any] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : Dict ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Union[str, Any] , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Any , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Union[str, Any] , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : List[Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Dict , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : Union[str, Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : int ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Optional[Any] , **__a : int ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[Any] , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Optional[Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[int] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[str] , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Tuple , **__a : Tuple ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[str] , **__a : int ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Tuple , **__a : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Any , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : str ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[str] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : str , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[Any] , **__a : List[str] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[str] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : str , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Tuple ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Any , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Tuple ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : int , **__a : Optional[Any] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Optional[int] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[str] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : List[str] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
| 654 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : str = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 654 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class A ( UpperCAmelCase ):
a_ = '''WhisperFeatureExtractor'''
a_ = '''WhisperTokenizer'''
def __init__( self : int , __a : str , __a : List[Any] ) -> Dict:
super().__init__(__a , __a )
__UpperCAmelCase = self.feature_extractor
__UpperCAmelCase = False
def snake_case__ ( self : Tuple , __a : Tuple=None , __a : Tuple=None , __a : List[Any]=True ) -> Dict:
return self.tokenizer.get_decoder_prompt_ids(task=__a , language=__a , no_timestamps=__a )
def __call__( self : Dict , *__a : int , **__a : Any ) -> List[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
__UpperCAmelCase = kwargs.pop('''audio''' , __a )
__UpperCAmelCase = kwargs.pop('''sampling_rate''' , __a )
__UpperCAmelCase = kwargs.pop('''text''' , __a )
if len(__a ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
__UpperCAmelCase = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a )
if text is not None:
__UpperCAmelCase = self.tokenizer(__a , **__a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__UpperCAmelCase = encodings['''input_ids''']
return inputs
def snake_case__ ( self : Tuple , *__a : List[str] , **__a : Union[str, Any] ) -> Any:
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case__ ( self : Optional[Any] , *__a : List[str] , **__a : Union[str, Any] ) -> Tuple:
return self.tokenizer.decode(*__a , **__a )
def snake_case__ ( self : str , __a : str , __a : List[Any]="np" ) -> Dict:
return self.tokenizer.get_prompt_ids(__a , return_tensors=__a )
| 654 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[Any] = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 | 1 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCAmelCase : str = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class A ( unittest.TestCase ):
def snake_case__ ( self : Optional[Any] , __a : Path , __a : Union[str, None] = None , __a : Union[List[str], None] = None , __a : Union[str, List[str], None] = None , __a : bool = True , ) -> Union[str, Any]:
__UpperCAmelCase = [file for file in os.listdir(__a ) if os.path.isfile(os.path.join(__a , __a ) )]
if identifier is not None:
__UpperCAmelCase = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__a , __a ):
for n_ in n_identifier:
__UpperCAmelCase = [file for file in files if n_ not in file]
else:
__UpperCAmelCase = [file for file in files if n_identifier not in file]
__UpperCAmelCase = ignore_files or []
ignore_files.append('''__init__.py''' )
__UpperCAmelCase = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , __a )
if only_modules:
__UpperCAmelCase = file.split('''.''' )[0]
try:
__UpperCAmelCase = getattr(__a , __a )
__UpperCAmelCase = doctest.DocTestSuite(__a )
__UpperCAmelCase = unittest.TextTestRunner().run(__a )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
__UpperCAmelCase = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def snake_case__ ( self : List[str] ) -> str:
__UpperCAmelCase = Path('''src/transformers''' )
__UpperCAmelCase = '''modeling'''
__UpperCAmelCase = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(__a , identifier=__a , ignore_files=__a )
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase = Path('''src/transformers''' )
__UpperCAmelCase = '''tokenization'''
self.analyze_directory(__a , identifier=__a )
def snake_case__ ( self : List[Any] ) -> List[str]:
__UpperCAmelCase = Path('''src/transformers''' )
__UpperCAmelCase = '''configuration'''
self.analyze_directory(__a , identifier=__a )
def snake_case__ ( self : Optional[int] ) -> int:
__UpperCAmelCase = Path('''src/transformers''' )
__UpperCAmelCase = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(__a , n_identifier=__a )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
__UpperCAmelCase = Path('''docs/source''' )
__UpperCAmelCase = ['''favicon.ico''']
self.analyze_directory(__a , ignore_files=__a , only_modules=__a )
| 654 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , ):
"""simple docstring"""
__UpperCAmelCase = {}
if train_file is not None:
__UpperCAmelCase = [train_file]
if eval_file is not None:
__UpperCAmelCase = [eval_file]
if test_file is not None:
__UpperCAmelCase = [test_file]
__UpperCAmelCase = datasets.load_dataset('''csv''' , data_files=UpperCamelCase__ )
__UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() )
__UpperCAmelCase = features_name.pop(UpperCamelCase__ )
__UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
__UpperCAmelCase = {label: i for i, label in enumerate(UpperCamelCase__ )}
__UpperCAmelCase = tokenizer.model_input_names
__UpperCAmelCase = {}
if len(UpperCamelCase__ ) == 1:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' ) , batched=UpperCamelCase__ , )
elif len(UpperCamelCase__ ) == 2:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , ) , batched=UpperCamelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class A :
a_ = field(metadata={'''help''': '''Which column contains the label'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the training file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the development file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the test file'''} )
a_ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
a_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def lowerCAmelCase ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase__ ) , labelaid=UpperCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict:
__UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__UpperCAmelCase = TFTrainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCAmelCase = trainer.evaluate()
__UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(UpperCamelCase__ )
return results
if __name__ == "__main__":
main()
| 654 | 1 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__lowerCAmelCase : Any = pytest.mark.integration
@require_faiss
class A ( UpperCAmelCase ):
def snake_case__ ( self : Dict ) -> Dict:
__UpperCAmelCase = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(__a ) for x in np.arange(3_0 ).tolist()]} )
return dset
def snake_case__ ( self : str ) -> int:
import faiss
__UpperCAmelCase = self._create_dummy_dataset()
__UpperCAmelCase = dset.map(
lambda __a , __a : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__a , keep_in_memory=__a )
__UpperCAmelCase = dset.add_faiss_index('''vecs''' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCAmelCase , __UpperCAmelCase = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def snake_case__ ( self : Any ) -> List[Any]:
import faiss
__UpperCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__UpperCAmelCase , __UpperCAmelCase = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
import faiss
__UpperCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase , __UpperCAmelCase = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def snake_case__ ( self : Dict ) -> List[str]:
__UpperCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(__a , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
from elasticsearch import Elasticsearch
__UpperCAmelCase = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
__UpperCAmelCase = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 3_0 )
__UpperCAmelCase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 2_9}]}}
__UpperCAmelCase = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=__a )
__UpperCAmelCase , __UpperCAmelCase = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class A ( UpperCAmelCase ):
def snake_case__ ( self : Union[str, Any] ) -> Dict:
import faiss
__UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 1_0 )
# single query
__UpperCAmelCase = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase = 1
__UpperCAmelCase , __UpperCAmelCase = index.search(__a )
self.assertRaises(__a , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__UpperCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
__UpperCAmelCase , __UpperCAmelCase = index.search_batch(__a )
self.assertRaises(__a , index.search_batch , queries[0] )
__UpperCAmelCase = [scores[0] for scores in total_scores]
__UpperCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __a )
def snake_case__ ( self : int ) -> int:
import faiss
__UpperCAmelCase = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__UpperCAmelCase = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__a ):
__UpperCAmelCase = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def snake_case__ ( self : List[Any] ) -> str:
import faiss
__UpperCAmelCase = faiss.IndexFlat(5 )
__UpperCAmelCase = FaissIndex(custom_index=__a )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def snake_case__ ( self : int ) -> Any:
import faiss
__UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
index.save(tmp_file.name )
__UpperCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase = 1
__UpperCAmelCase , __UpperCAmelCase = index.search(__a )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
import faiss
__UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__UpperCAmelCase = '''index.faiss'''
__UpperCAmelCase = f"""mock://{index_name}"""
index.save(UpperCamelCase__ , storage_options=mockfs.storage_options )
__UpperCAmelCase = FaissIndex.load(UpperCamelCase__ , storage_options=mockfs.storage_options )
__UpperCAmelCase = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase = 1
__UpperCAmelCase , __UpperCAmelCase = index.search(UpperCamelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class A ( UpperCAmelCase ):
def snake_case__ ( self : Union[str, Any] ) -> str:
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
__UpperCAmelCase = Elasticsearch()
__UpperCAmelCase = {'''acknowledged''': True}
__UpperCAmelCase = ElasticSearchIndex(es_client=__a )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
__UpperCAmelCase = '''foo'''
__UpperCAmelCase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
__UpperCAmelCase , __UpperCAmelCase = index.search(__a )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__UpperCAmelCase = '''foo'''
__UpperCAmelCase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
__UpperCAmelCase , __UpperCAmelCase = index.search(__a , request_timeout=3_0 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__UpperCAmelCase = ['''foo''', '''bar''', '''foobar''']
__UpperCAmelCase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
__UpperCAmelCase , __UpperCAmelCase = index.search_batch(__a )
__UpperCAmelCase = [scores[0] for scores in total_scores]
__UpperCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
# batched queries with timeout
__UpperCAmelCase = ['''foo''', '''bar''', '''foobar''']
__UpperCAmelCase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
__UpperCAmelCase , __UpperCAmelCase = index.search_batch(__a , request_timeout=3_0 )
__UpperCAmelCase = [scores[0] for scores in total_scores]
__UpperCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
| 654 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__( self : List[Any] , __a : Any , ) -> Dict:
__UpperCAmelCase = parent
__UpperCAmelCase = 1_3
__UpperCAmelCase = 7
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = 9_9
__UpperCAmelCase = 3_2
__UpperCAmelCase = 2
__UpperCAmelCase = 4
__UpperCAmelCase = 3_7
__UpperCAmelCase = '''gelu'''
__UpperCAmelCase = 0.1
__UpperCAmelCase = 0.1
__UpperCAmelCase = 5_1_2
__UpperCAmelCase = 1_6
__UpperCAmelCase = 2
__UpperCAmelCase = 0.0_2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
__UpperCAmelCase = None
def snake_case__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Union[str, Any] , __a : List[str] , __a : int , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : int ) -> Any:
__UpperCAmelCase = TFDistilBertModel(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
__UpperCAmelCase = [input_ids, input_mask]
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Tuple , __a : List[Any] , __a : int , __a : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : List[Any] ) -> int:
__UpperCAmelCase = TFDistilBertForMaskedLM(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[int] , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Optional[int] ) -> Dict:
__UpperCAmelCase = TFDistilBertForQuestionAnswering(config=__a )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Any , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : Dict , __a : int , __a : List[Any] ) -> Dict:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForSequenceClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] , __a : Dict ) -> str:
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = TFDistilBertForMultipleChoice(__a )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : Tuple , __a : int , __a : Optional[int] , __a : Optional[int] ) -> int:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForTokenClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : str ) -> Any:
__UpperCAmelCase = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = TFDistilBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , dim=3_7 )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ) -> str:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__a )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__a )
def snake_case__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__a )
def snake_case__ ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a )
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a )
def snake_case__ ( self : List[str] ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__a )
@slow
def snake_case__ ( self : Dict ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCAmelCase = TFDistilBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class A ( unittest.TestCase ):
@slow
def snake_case__ ( self : int ) -> Dict:
__UpperCAmelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase = model(__a )[0]
__UpperCAmelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , __a )
__UpperCAmelCase = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
| 654 | 1 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = len(UpperCamelCase__ )
__UpperCAmelCase = []
for i in range(len(UpperCamelCase__ ) - pat_len + 1 ):
__UpperCAmelCase = True
for j in range(UpperCamelCase__ ):
if s[i + j] != pattern[j]:
__UpperCAmelCase = False
break
if match_found:
position.append(UpperCamelCase__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 654 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowerCAmelCase : List[Any] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 | 1 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__lowerCAmelCase : Optional[int] = random.Random()
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=1.0 , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Any]=None ):
"""simple docstring"""
if rng is None:
__UpperCAmelCase = global_rng
__UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A ( unittest.TestCase ):
def __init__( self : int , __a : int , __a : Tuple=7 , __a : Dict=4_0_0 , __a : str=2_0_0_0 , __a : Optional[Any]=2_0_4_8 , __a : Optional[int]=1_2_8 , __a : Optional[int]=1 , __a : int=5_1_2 , __a : Union[str, Any]=3_0 , __a : Any=4_4_1_0_0 , ) -> Tuple:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = min_seq_length
__UpperCAmelCase = max_seq_length
__UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCAmelCase = spectrogram_length
__UpperCAmelCase = feature_size
__UpperCAmelCase = num_audio_channels
__UpperCAmelCase = hop_length
__UpperCAmelCase = chunk_length
__UpperCAmelCase = sampling_rate
def snake_case__ ( self : Union[str, Any] ) -> Dict:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def snake_case__ ( self : Union[str, Any] , __a : Optional[Any]=False , __a : int=False ) -> Union[str, Any]:
def _flatten(__a : int ):
return list(itertools.chain(*__a ) )
if equal_length:
__UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCAmelCase = [np.asarray(__a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A ( UpperCAmelCase , unittest.TestCase ):
a_ = TvltFeatureExtractor
def snake_case__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase = TvltFeatureExtractionTester(self )
def snake_case__ ( self : List[str] ) -> str:
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__a , '''spectrogram_length''' ) )
self.assertTrue(hasattr(__a , '''feature_size''' ) )
self.assertTrue(hasattr(__a , '''num_audio_channels''' ) )
self.assertTrue(hasattr(__a , '''hop_length''' ) )
self.assertTrue(hasattr(__a , '''chunk_length''' ) )
self.assertTrue(hasattr(__a , '''sampling_rate''' ) )
def snake_case__ ( self : Tuple ) -> List[str]:
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = feat_extract_first.save_pretrained(__a )[0]
check_json_file_has_correct_format(__a )
__UpperCAmelCase = self.feature_extraction_class.from_pretrained(__a )
__UpperCAmelCase = feat_extract_first.to_dict()
__UpperCAmelCase = feat_extract_second.to_dict()
__UpperCAmelCase = dict_first.pop('''mel_filters''' )
__UpperCAmelCase = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__a , __a ) )
self.assertEqual(__a , __a )
def snake_case__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = os.path.join(__a , '''feat_extract.json''' )
feat_extract_first.to_json_file(__a )
__UpperCAmelCase = self.feature_extraction_class.from_json_file(__a )
__UpperCAmelCase = feat_extract_first.to_dict()
__UpperCAmelCase = feat_extract_second.to_dict()
__UpperCAmelCase = dict_first.pop('''mel_filters''' )
__UpperCAmelCase = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__a , __a ) )
self.assertEqual(__a , __a )
def snake_case__ ( self : str ) -> List[Any]:
# Initialize feature_extractor
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__UpperCAmelCase = [np.asarray(__a ) for speech_input in speech_inputs]
# Test not batched input
__UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__UpperCAmelCase = feature_extractor(__a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__UpperCAmelCase = feature_extractor(
__a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=__a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__UpperCAmelCase = np.asarray(__a )
__UpperCAmelCase = feature_extractor(__a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def snake_case__ ( self : Tuple , __a : List[Any] ) -> int:
__UpperCAmelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__UpperCAmelCase = ds.sort('''id''' ).select(range(__a ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def snake_case__ ( self : Optional[int] ) -> Any:
__UpperCAmelCase = self._load_datasamples(1 )
__UpperCAmelCase = TvltFeatureExtractor()
__UpperCAmelCase = feature_extractor(__a , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
__UpperCAmelCase = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __a , atol=1e-4 ) )
| 654 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class A ( UpperCAmelCase ):
a_ = '''bert-generation'''
def __init__( self : str , __a : str=5_0_3_5_8 , __a : int=1_0_2_4 , __a : Optional[Any]=2_4 , __a : Any=1_6 , __a : int=4_0_9_6 , __a : Any="gelu" , __a : Union[str, Any]=0.1 , __a : Any=0.1 , __a : Union[str, Any]=5_1_2 , __a : int=0.0_2 , __a : str=1e-12 , __a : List[str]=0 , __a : Optional[int]=2 , __a : Tuple=1 , __a : str="absolute" , __a : Optional[Any]=True , **__a : Tuple , ) -> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
| 654 | 1 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A ( unittest.TestCase ):
def snake_case__ ( self : List[Any] , __a : List[str] , __a : Optional[Any] ) -> List[Any]:
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(__a ) for s in shape] )}.npy"""
def snake_case__ ( self : Dict ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case__ ( self : Optional[Any] , __a : Tuple=0 , __a : List[Any]=(4, 4, 6_4, 6_4) , __a : Optional[Any]=False ) -> Tuple:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return image
def snake_case__ ( self : int , __a : Optional[Any]=False , __a : Optional[Any]="CompVis/stable-diffusion-v1-4" ) -> Any:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = '''bf16''' if fpaa else None
__UpperCAmelCase , __UpperCAmelCase = FlaxUNetaDConditionModel.from_pretrained(
__a , subfolder='''unet''' , dtype=__a , revision=__a )
return model, params
def snake_case__ ( self : str , __a : int=0 , __a : Tuple=(4, 7_7, 7_6_8) , __a : Optional[int]=False ) -> Union[str, Any]:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[1_7, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1_0_0_0, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def snake_case__ ( self : Tuple , __a : Tuple , __a : str , __a : Optional[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[1_7, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1_0_0_0, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def snake_case__ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , shape=(4, 4, 9_6, 9_6) , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , shape=(4, 7_7, 1_0_2_4) , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
| 654 |
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__lowerCAmelCase : str = 299_792_458
# Symbols
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = symbols("ct x y z")
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 )
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return np.array(
[
[gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0],
[-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : np.ndarray | None = None ):
"""simple docstring"""
# Ensure event is not empty
if event is None:
__UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCamelCase__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__lowerCAmelCase : Dict = transform(29_979_245)
print("Example of four vector: ")
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__lowerCAmelCase : Union[str, Any] = {ct: c, x: 1, y: 1, z: 1}
__lowerCAmelCase : Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 654 | 1 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
__UpperCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i]
__UpperCAmelCase = []
__UpperCAmelCase = 0
__UpperCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__UpperCAmelCase = []
__UpperCAmelCase = -1
for i in range(UpperCamelCase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__UpperCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__UpperCAmelCase = 0
__UpperCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__lowerCAmelCase : List[Any] = 4
__lowerCAmelCase : List[Any] = [2, 5, 3, 7]
__lowerCAmelCase : Tuple = [0, 0, 0, 0]
__lowerCAmelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase : Dict = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 654 |
'''simple docstring'''
import heapq
import sys
import numpy as np
__lowerCAmelCase : Any = tuple[int, int]
class A :
def __init__( self : Optional[int] ) -> int:
__UpperCAmelCase = []
__UpperCAmelCase = set()
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def snake_case__ ( self : Dict ) -> Optional[int]:
return len(self.elements ) == 0
def snake_case__ ( self : Optional[int] , __a : Optional[Any] , __a : Dict ) -> Optional[Any]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__a )
else:
# update
# print("update", item)
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def snake_case__ ( self : int , __a : Any ) -> int:
if item in self.set:
self.set.remove(__a )
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def snake_case__ ( self : List[str] ) -> Dict:
return self.elements[0][1]
def snake_case__ ( self : Any ) -> List[str]:
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
self.set.remove(__a )
return (priority, item)
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# euclidean distance
__UpperCAmelCase = np.array(UpperCamelCase__ )
__UpperCAmelCase = np.array(UpperCamelCase__ )
return np.linalg.norm(a - b )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# integer division by time variable
return consistent_heuristic(UpperCamelCase__ , UpperCamelCase__ ) // t
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] ):
"""simple docstring"""
__UpperCAmelCase = g_function[start] + Wa * heuristics[i](UpperCamelCase__ , UpperCamelCase__ )
return ans
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase = np.chararray((n, n) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
__UpperCAmelCase = '''*'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (j, (n - 1) - i) in blocks:
__UpperCAmelCase = '''#'''
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[goal]
while x != start:
((__UpperCAmelCase) , (__UpperCAmelCase)) = x
# print(x)
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[x]
__UpperCAmelCase = '''-'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
__UpperCAmelCase = back_pointer[goal]
while x != start:
print(UpperCamelCase__ , end=''' ''' )
__UpperCAmelCase = back_pointer[x]
print(UpperCamelCase__ )
sys.exit()
def lowerCAmelCase ( UpperCamelCase__ : TPos ):
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , ):
"""simple docstring"""
for itera in range(UpperCamelCase__ ):
open_list[itera].remove_element(UpperCamelCase__ )
# print("s", s)
# print("j", j)
((__UpperCAmelCase) , (__UpperCAmelCase)) = s
__UpperCAmelCase = (x - 1, y)
__UpperCAmelCase = (x + 1, y)
__UpperCAmelCase = (x, y + 1)
__UpperCAmelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(UpperCamelCase__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(UpperCamelCase__ )
__UpperCAmelCase = -1
__UpperCAmelCase = float('''inf''' )
if valid(UpperCamelCase__ ) and g_function[neighbours] > g_function[s] + 1:
__UpperCAmelCase = g_function[s] + 1
__UpperCAmelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(UpperCamelCase__ , key(UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ) )
if neighbours not in close_list_inad:
for var in range(1 , UpperCamelCase__ ):
if key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) <= Wa * key(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ):
open_list[j].put(
UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
__lowerCAmelCase : Optional[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__lowerCAmelCase : List[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__lowerCAmelCase : Dict = make_common_ground()
__lowerCAmelCase : int = blocks_blk
# hyper parameters
__lowerCAmelCase : Dict = 1
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : Union[str, Any] = 20
__lowerCAmelCase : Any = 3 # one consistent and two other inconsistent
# start and end destination
__lowerCAmelCase : Optional[Any] = (0, 0)
__lowerCAmelCase : Any = (n - 1, n - 1)
__lowerCAmelCase : Optional[int] = 1
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = {start: 0, goal: float('''inf''' )}
__UpperCAmelCase = {start: -1, goal: -1}
__UpperCAmelCase = []
__UpperCAmelCase = set()
for i in range(UpperCamelCase__ ):
open_list.append(PriorityQueue() )
open_list[i].put(UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
__UpperCAmelCase = []
__UpperCAmelCase = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , UpperCamelCase__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase , __UpperCAmelCase = open_list[i].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_inad.append(UpperCamelCase__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase = open_list[0].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_anchor.append(UpperCamelCase__ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(UpperCamelCase__ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 654 | 1 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__lowerCAmelCase : List[str] = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__lowerCAmelCase : str = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
__lowerCAmelCase : int = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def snake_case__ ( self : int ) -> int:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : int = CHRF.CHAR_ORDER , __a : int = CHRF.WORD_ORDER , __a : int = CHRF.BETA , __a : bool = False , __a : bool = False , __a : bool = False , ) -> Tuple:
__UpperCAmelCase = len(references[0] )
if any(len(__a ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__UpperCAmelCase = [[refs[i] for refs in references] for i in range(__a )]
__UpperCAmelCase = CHRF(__a , __a , __a , __a , __a , __a )
__UpperCAmelCase = sb_chrf.corpus_score(__a , __a )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 654 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__lowerCAmelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : str = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase : int = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__lowerCAmelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase : List[str] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__lowerCAmelCase : Optional[int] = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
__UpperCAmelCase = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = tf_models
__UpperCAmelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = flax_models
__UpperCAmelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = pt_models
__UpperCAmelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCAmelCase = True
break
# Try again after removing the last word in the name
__UpperCAmelCase = ''''''.join(camel_case_split(UpperCamelCase__ )[:-1] )
__UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCAmelCase = list(UpperCamelCase__ )
all_models.sort()
__UpperCAmelCase = {'''model_type''': all_models}
__UpperCAmelCase = [pt_models[t] for t in all_models]
__UpperCAmelCase = [tf_models[t] for t in all_models]
__UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCAmelCase = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCAmelCase = '''AutoTokenizer'''
__UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCAmelCase = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
__UpperCAmelCase = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
__UpperCAmelCase = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = get_frameworks_table()
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
__UpperCAmelCase = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=UpperCamelCase__ )
__UpperCAmelCase = Dataset.from_json(UpperCamelCase__ )
__UpperCAmelCase = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(UpperCamelCase__ ) )
}
__UpperCAmelCase = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCAmelCase = sorted(table.keys() )
__UpperCAmelCase = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , '''pipeline_tags.json''' ) )
if commit_sha is not None:
__UpperCAmelCase = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCAmelCase = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=UpperCamelCase__ , repo_type='''dataset''' , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCAmelCase = pipeline_tasks[key]['''pt''']
if isinstance(UpperCamelCase__ , (list, tuple) ):
__UpperCAmelCase = model[0]
__UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ''', '''.join(UpperCamelCase__ )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__lowerCAmelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 654 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=UpperCamelCase__ )
__UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(UpperCamelCase__ )
EnvironmentCommand.register_subcommand(UpperCamelCase__ )
TestCommand.register_subcommand(UpperCamelCase__ )
RunBeamCommand.register_subcommand(UpperCamelCase__ )
DummyDataCommand.register_subcommand(UpperCamelCase__ )
# Parse args
__UpperCAmelCase , __UpperCAmelCase = parser.parse_known_args()
if not hasattr(UpperCamelCase__ , '''func''' ):
parser.print_help()
exit(1 )
__UpperCAmelCase = parse_unknown_args(UpperCamelCase__ )
# Run
__UpperCAmelCase = args.func(UpperCamelCase__ , **UpperCamelCase__ )
service.run()
if __name__ == "__main__":
main()
| 654 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCAmelCase : Optional[int] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class A ( unittest.TestCase ):
def snake_case__ ( self : Any , __a : str , __a : bool , __a : str = None , __a : list = None ) -> Tuple:
__UpperCAmelCase = None
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
__UpperCAmelCase = os.path.abspath('''examples''' )
for item in os.listdir(__a ):
if item not in EXCLUDE_EXAMPLES:
__UpperCAmelCase = os.path.join(__a , __a )
if os.path.isfile(__a ) and ".py" in item_path:
with self.subTest(
tested_script=__a , feature_script=__a , tested_section='''main()''' if parser_only else '''training_function()''' , ):
__UpperCAmelCase = compare_against_test(
os.path.join(__a , __a ) , __a , __a , __a )
__UpperCAmelCase = '''\n'''.join(__a )
if special_strings is not None:
for string in special_strings:
__UpperCAmelCase = diff.replace(__a , '''''' )
self.assertEqual(__a , '''''' )
def snake_case__ ( self : Optional[Any] ) -> str:
self.one_complete_example('''complete_nlp_example.py''' , __a )
self.one_complete_example('''complete_nlp_example.py''' , __a )
def snake_case__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
__UpperCAmelCase = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class A ( UpperCAmelCase ):
a_ = False
@classmethod
def snake_case__ ( cls : Tuple ) -> str:
super().setUpClass()
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Dict ) -> int:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
def snake_case__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
if torch.cuda.is_available():
__UpperCAmelCase = torch.cuda.device_count()
else:
__UpperCAmelCase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
else:
self.assertIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
@slow
def snake_case__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
__UpperCAmelCase = re.findall('''({.+})''' , __a )
__UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1]
__UpperCAmelCase = ast.literal_eval(__a )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case__ ( self : Dict ) -> int:
__UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCAmelCase = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__a , '''tracking''' ) ) )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 654 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[Any] = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCAmelCase : Any = ""
__lowerCAmelCase : int = ""
__lowerCAmelCase : Union[str, Any] = ""
__lowerCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase = get_dataset(UpperCamelCase__ , UpperCamelCase__ )
print('''Processing...''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = update_image_and_anno(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for index, image in enumerate(UpperCamelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCAmelCase = random_chars(3_2 )
__UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCAmelCase = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"""Success {index+1}/{len(UpperCamelCase__ )} with {file_name}""" )
__UpperCAmelCase = []
for anno in new_annos[index]:
__UpperCAmelCase = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(UpperCamelCase__ )
with open(f"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
for label_file in glob.glob(os.path.join(UpperCamelCase__ , '''*.txt''' ) ):
__UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(UpperCamelCase__ ) as in_file:
__UpperCAmelCase = in_file.readlines()
__UpperCAmelCase = os.path.join(UpperCamelCase__ , f"""{label_name}.jpg""" )
__UpperCAmelCase = []
for obj_list in obj_lists:
__UpperCAmelCase = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCamelCase__ )
labels.append(UpperCamelCase__ )
return img_paths, labels
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int = 1 ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = []
for idx in range(len(UpperCamelCase__ ) ):
__UpperCAmelCase = []
__UpperCAmelCase = img_list[idx]
path_list.append(UpperCamelCase__ )
__UpperCAmelCase = anno_list[idx]
__UpperCAmelCase = cva.imread(UpperCamelCase__ )
if flip_type == 1:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCamelCase__ )
new_imgs_list.append(UpperCamelCase__ )
return new_imgs_list, new_annos_lists, path_list
def lowerCAmelCase ( UpperCamelCase__ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 654 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class A ( UpperCAmelCase ):
a_ = '''bert-generation'''
def __init__( self : str , __a : str=5_0_3_5_8 , __a : int=1_0_2_4 , __a : Optional[Any]=2_4 , __a : Any=1_6 , __a : int=4_0_9_6 , __a : Any="gelu" , __a : Union[str, Any]=0.1 , __a : Any=0.1 , __a : Union[str, Any]=5_1_2 , __a : int=0.0_2 , __a : str=1e-12 , __a : List[str]=0 , __a : Optional[int]=2 , __a : Tuple=1 , __a : str="absolute" , __a : Optional[Any]=True , **__a : Tuple , ) -> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
| 654 |
'''simple docstring'''
from pathlib import Path
import fire
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = Path(UpperCamelCase__ )
__UpperCAmelCase = Path(UpperCamelCase__ )
dest_dir.mkdir(exist_ok=UpperCamelCase__ )
for path in src_dir.iterdir():
__UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
__UpperCAmelCase = dest_dir.joinpath(path.name )
print(UpperCamelCase__ )
dest_path.open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 654 | 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def snake_case__ ( *__a : Dict , **__a : Tuple ) -> int:
pass
@is_pipeline_test
@require_torch
@require_vision
class A ( unittest.TestCase ):
a_ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def snake_case__ ( self : Optional[int] , __a : Optional[int] , __a : Optional[int] , __a : Optional[Any] ) -> List[str]:
__UpperCAmelCase = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
__UpperCAmelCase = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def snake_case__ ( self : List[str] , __a : Tuple , __a : Any ) -> Dict:
__UpperCAmelCase = vqa_pipeline(__a , top_k=1 )
self.assertEqual(
__a , [
[{'''score''': ANY(__a ), '''answer''': ANY(__a )}],
[{'''score''': ANY(__a ), '''answer''': ANY(__a )}],
] , )
@require_torch
def snake_case__ ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
__UpperCAmelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__UpperCAmelCase = '''How many cats are there?'''
__UpperCAmelCase = vqa_pipeline(image=__a , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
__a , [{'''score''': ANY(__a ), '''answer''': ANY(__a )}, {'''score''': ANY(__a ), '''answer''': ANY(__a )}] )
__UpperCAmelCase = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
__a , [{'''score''': ANY(__a ), '''answer''': ANY(__a )}, {'''score''': ANY(__a ), '''answer''': ANY(__a )}] )
@slow
@require_torch
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
__UpperCAmelCase = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
__UpperCAmelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__UpperCAmelCase = '''How many cats are there?'''
__UpperCAmelCase = vqa_pipeline(image=__a , question=__a , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}] )
__UpperCAmelCase = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}] )
__UpperCAmelCase = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def snake_case__ ( self : Tuple ) -> str:
pass
| 654 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = f"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase__ )
if number < 1:
__UpperCAmelCase = f"""Input value of [number={number}] must be > 0"""
raise ValueError(UpperCamelCase__ )
__UpperCAmelCase = 1
for i in range(1 , UpperCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 1 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__lowerCAmelCase : Optional[Any] = (
"This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
)
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
warnings.warn(UpperCamelCase__ , UpperCamelCase__ )
requires_backends(UpperCamelCase__ , '''sklearn''' )
return (preds == labels).mean()
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ):
"""simple docstring"""
warnings.warn(UpperCamelCase__ , UpperCamelCase__ )
requires_backends(UpperCamelCase__ , '''sklearn''' )
__UpperCAmelCase = simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = fa_score(y_true=UpperCamelCase__ , y_pred=UpperCamelCase__ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
warnings.warn(UpperCamelCase__ , UpperCamelCase__ )
requires_backends(UpperCamelCase__ , '''sklearn''' )
__UpperCAmelCase = pearsonr(UpperCamelCase__ , UpperCamelCase__ )[0]
__UpperCAmelCase = spearmanr(UpperCamelCase__ , UpperCamelCase__ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
warnings.warn(UpperCamelCase__ , UpperCamelCase__ )
requires_backends(UpperCamelCase__ , '''sklearn''' )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), f"""Predictions and labels have mismatched lengths {len(UpperCamelCase__ )} and {len(UpperCamelCase__ )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(UpperCamelCase__ , UpperCamelCase__ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
elif task_name == "mrpc":
return acc_and_fa(UpperCamelCase__ , UpperCamelCase__ )
elif task_name == "sts-b":
return pearson_and_spearman(UpperCamelCase__ , UpperCamelCase__ )
elif task_name == "qqp":
return acc_and_fa(UpperCamelCase__ , UpperCamelCase__ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
elif task_name == "rte":
return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
elif task_name == "hans":
return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
else:
raise KeyError(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Any ):
"""simple docstring"""
warnings.warn(UpperCamelCase__ , UpperCamelCase__ )
requires_backends(UpperCamelCase__ , '''sklearn''' )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(UpperCamelCase__ )} and {len(UpperCamelCase__ )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
else:
raise KeyError(UpperCamelCase__ )
| 654 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class A ( nn.Module ):
def __init__( self : Optional[Any] ) -> int:
super().__init__()
__UpperCAmelCase = nn.Linear(3 , 4 )
__UpperCAmelCase = nn.BatchNormad(4 )
__UpperCAmelCase = nn.Linear(4 , 5 )
def snake_case__ ( self : List[str] , __a : Optional[int] ) -> Optional[int]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A ( unittest.TestCase ):
def snake_case__ ( self : Optional[int] ) -> Any:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def snake_case__ ( self : Any ) -> int:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__a : Optional[int] ):
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : Any ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Union[str, Any] , __a : int ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__a ) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def snake_case__ ( self : Any ) -> List[Any]:
__UpperCAmelCase = torch.cuda.memory_allocated()
__UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __a )
__UpperCAmelCase = release_memory(__a )
self.assertEqual(torch.cuda.memory_allocated() , __a )
| 654 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class A ( UpperCAmelCase ):
a_ = '''lxmert'''
a_ = {}
def __init__( self : int , __a : Dict=3_0_5_2_2 , __a : Union[str, Any]=7_6_8 , __a : int=1_2 , __a : Optional[int]=9_5_0_0 , __a : int=1_6_0_0 , __a : List[Any]=4_0_0 , __a : Tuple=3_0_7_2 , __a : Any="gelu" , __a : List[str]=0.1 , __a : Dict=0.1 , __a : Tuple=5_1_2 , __a : str=2 , __a : str=0.0_2 , __a : Tuple=1e-12 , __a : str=9 , __a : List[str]=5 , __a : str=5 , __a : List[str]=2_0_4_8 , __a : List[str]=4 , __a : int=6.6_7 , __a : Any=True , __a : List[Any]=True , __a : List[Any]=True , __a : Optional[int]=True , __a : List[Any]=True , __a : Tuple=True , __a : Optional[Any]=True , **__a : Any , ) -> int:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = num_qa_labels
__UpperCAmelCase = num_object_labels
__UpperCAmelCase = num_attr_labels
__UpperCAmelCase = l_layers
__UpperCAmelCase = x_layers
__UpperCAmelCase = r_layers
__UpperCAmelCase = visual_feat_dim
__UpperCAmelCase = visual_pos_dim
__UpperCAmelCase = visual_loss_normalizer
__UpperCAmelCase = task_matched
__UpperCAmelCase = task_mask_lm
__UpperCAmelCase = task_obj_predict
__UpperCAmelCase = task_qa
__UpperCAmelCase = visual_obj_loss
__UpperCAmelCase = visual_attr_loss
__UpperCAmelCase = visual_feat_loss
__UpperCAmelCase = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__a )
| 654 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = u
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = temp * (u - i)
return temp
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = int(input('''enter the numbers of values: ''' ) )
__UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
__UpperCAmelCase = 0
print('''enter the values of parameters in a list: ''' )
__UpperCAmelCase = list(map(UpperCamelCase__ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = float(input() )
__UpperCAmelCase = int(input('''enter the value to interpolate: ''' ) )
__UpperCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
__UpperCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
__UpperCAmelCase = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 654 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A ( UpperCAmelCase ):
def __init__( self : Union[str, Any] , __a : TransformeraDModel , __a : AutoencoderKL , __a : KarrasDiffusionSchedulers , __a : Optional[Dict[int, str]] = None , ) -> List[Any]:
super().__init__()
self.register_modules(transformer=__a , vae=__a , scheduler=__a )
# create a imagenet -> id dictionary for easier use
__UpperCAmelCase = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
__UpperCAmelCase = int(__a )
__UpperCAmelCase = dict(sorted(self.labels.items() ) )
def snake_case__ ( self : Optional[Any] , __a : Union[str, List[str]] ) -> List[int]:
if not isinstance(__a , __a ):
__UpperCAmelCase = list(__a )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : List[Any] , __a : List[int] , __a : float = 4.0 , __a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a : int = 5_0 , __a : Optional[str] = "pil" , __a : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
__UpperCAmelCase = len(__a )
__UpperCAmelCase = self.transformer.config.sample_size
__UpperCAmelCase = self.transformer.config.in_channels
__UpperCAmelCase = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__a , device=self.device , dtype=self.transformer.dtype , )
__UpperCAmelCase = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__UpperCAmelCase = torch.tensor(__a , device=self.device ).reshape(-1 )
__UpperCAmelCase = torch.tensor([1_0_0_0] * batch_size , device=self.device )
__UpperCAmelCase = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__a )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__UpperCAmelCase = latent_model_input[: len(__a ) // 2]
__UpperCAmelCase = torch.cat([half, half] , dim=0 )
__UpperCAmelCase = self.scheduler.scale_model_input(__a , __a )
__UpperCAmelCase = t
if not torch.is_tensor(__a ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__UpperCAmelCase = latent_model_input.device.type == '''mps'''
if isinstance(__a , __a ):
__UpperCAmelCase = torch.floataa if is_mps else torch.floataa
else:
__UpperCAmelCase = torch.intaa if is_mps else torch.intaa
__UpperCAmelCase = torch.tensor([timesteps] , dtype=__a , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__UpperCAmelCase = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__UpperCAmelCase = self.transformer(
__a , timestep=__a , class_labels=__a ).sample
# perform guidance
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__UpperCAmelCase , __UpperCAmelCase = torch.split(__a , len(__a ) // 2 , dim=0 )
__UpperCAmelCase = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__UpperCAmelCase = torch.cat([half_eps, half_eps] , dim=0 )
__UpperCAmelCase = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__UpperCAmelCase , __UpperCAmelCase = torch.split(__a , __a , dim=1 )
else:
__UpperCAmelCase = noise_pred
# compute previous image: x_t -> x_t-1
__UpperCAmelCase = self.scheduler.step(__a , __a , __a ).prev_sample
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase = latent_model_input.chunk(2 , dim=0 )
else:
__UpperCAmelCase = latent_model_input
__UpperCAmelCase = 1 / self.vae.config.scaling_factor * latents
__UpperCAmelCase = self.vae.decode(__a ).sample
__UpperCAmelCase = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__UpperCAmelCase = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__UpperCAmelCase = self.numpy_to_pil(__a )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__a )
| 654 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__UpperCAmelCase = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
__UpperCAmelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
__UpperCAmelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = dct.pop(UpperCamelCase__ )
__UpperCAmelCase = val
def lowerCAmelCase ( UpperCamelCase__ : Dict ):
"""simple docstring"""
if "handwritten" in checkpoint_url:
__UpperCAmelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCamelCase__ )
__UpperCAmelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__UpperCAmelCase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = 4_0_9_6
__UpperCAmelCase = 2_4
__UpperCAmelCase = 1_6
__UpperCAmelCase = 1_0_2_4
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = False
__UpperCAmelCase = '''relu'''
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
# load HuggingFace model
__UpperCAmelCase = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ )
__UpperCAmelCase = TrOCRForCausalLM(UpperCamelCase__ )
__UpperCAmelCase = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
__UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , check_hash=UpperCamelCase__ )['''model''']
__UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
__UpperCAmelCase = val
else:
__UpperCAmelCase = val
# load state dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
__UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size )
__UpperCAmelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
__UpperCAmelCase = TrOCRProcessor(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = processor(images=prepare_img(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
__UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__UpperCAmelCase = model(pixel_values=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ )
__UpperCAmelCase = outputs.logits
__UpperCAmelCase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , UpperCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 654 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase : Optional[int] = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
__lowerCAmelCase : Optional[Any] = {
"gpt-neox-20b": 2_048,
}
class A ( UpperCAmelCase ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] , __a : str=None , __a : List[str]=None , __a : Optional[int]=None , __a : Tuple="<|endoftext|>" , __a : List[str]="<|endoftext|>" , __a : Dict="<|endoftext|>" , __a : Tuple=False , **__a : Optional[int] , ) -> Dict:
super().__init__(
__a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , add_prefix_space=__a , **__a , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __a ) != add_prefix_space:
__UpperCAmelCase = getattr(__a , pre_tok_state.pop('''type''' ) )
__UpperCAmelCase = add_prefix_space
__UpperCAmelCase = pre_tok_class(**__a )
__UpperCAmelCase = add_prefix_space
def snake_case__ ( self : List[str] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
__UpperCAmelCase = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def snake_case__ ( self : Tuple , __a : "Conversation" ) -> List[int]:
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 654 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A ( unittest.TestCase ):
def snake_case__ ( self : List[Any] , __a : List[str] , __a : Optional[Any] ) -> List[Any]:
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(__a ) for s in shape] )}.npy"""
def snake_case__ ( self : Dict ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case__ ( self : Optional[Any] , __a : Tuple=0 , __a : List[Any]=(4, 4, 6_4, 6_4) , __a : Optional[Any]=False ) -> Tuple:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return image
def snake_case__ ( self : int , __a : Optional[Any]=False , __a : Optional[Any]="CompVis/stable-diffusion-v1-4" ) -> Any:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = '''bf16''' if fpaa else None
__UpperCAmelCase , __UpperCAmelCase = FlaxUNetaDConditionModel.from_pretrained(
__a , subfolder='''unet''' , dtype=__a , revision=__a )
return model, params
def snake_case__ ( self : str , __a : int=0 , __a : Tuple=(4, 7_7, 7_6_8) , __a : Optional[int]=False ) -> Union[str, Any]:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[1_7, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1_0_0_0, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def snake_case__ ( self : Tuple , __a : Tuple , __a : str , __a : Optional[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[1_7, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1_0_0_0, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def snake_case__ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , shape=(4, 4, 9_6, 9_6) , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , shape=(4, 7_7, 1_0_2_4) , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
| 654 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[int] = "examples/"
__lowerCAmelCase : Dict = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__lowerCAmelCase : List[str] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__lowerCAmelCase : int = "README.md"
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
"""simple docstring"""
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase , __UpperCAmelCase = REPLACE_PATTERNS[pattern]
__UpperCAmelCase = replace.replace('''VERSION''' , UpperCamelCase__ )
__UpperCAmelCase = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern='''examples''' )
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Any=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not patch:
update_version_in_examples(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
__UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.readlines()
# Find the start of the list.
__UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase__ ).groups()[0]
return packaging.version.parse(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Any=False ):
"""simple docstring"""
__UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__UpperCAmelCase = default_version.base_version
elif patch:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = get_version()
__UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
__UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__lowerCAmelCase : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 654 | 1 |
'''simple docstring'''
__lowerCAmelCase : Any = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__lowerCAmelCase : Optional[int] = frozenset(["prompt", "negative_prompt"])
__lowerCAmelCase : Tuple = frozenset([])
__lowerCAmelCase : str = frozenset(["image"])
__lowerCAmelCase : int = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__lowerCAmelCase : Optional[Any] = frozenset(["image"])
__lowerCAmelCase : int = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__lowerCAmelCase : str = frozenset(["prompt", "image", "negative_prompt"])
__lowerCAmelCase : Union[str, Any] = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__lowerCAmelCase : int = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__lowerCAmelCase : Any = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__lowerCAmelCase : Union[str, Any] = frozenset(["image", "mask_image"])
__lowerCAmelCase : str = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__lowerCAmelCase : int = frozenset(["example_image", "image", "mask_image"])
__lowerCAmelCase : int = frozenset(["class_labels"])
__lowerCAmelCase : List[str] = frozenset(["class_labels"])
__lowerCAmelCase : Optional[Any] = frozenset(["batch_size"])
__lowerCAmelCase : Union[str, Any] = frozenset([])
__lowerCAmelCase : Union[str, Any] = frozenset(["batch_size"])
__lowerCAmelCase : str = frozenset([])
__lowerCAmelCase : Optional[Any] = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__lowerCAmelCase : List[str] = frozenset(["prompt", "negative_prompt"])
__lowerCAmelCase : int = frozenset(["input_tokens"])
__lowerCAmelCase : Union[str, Any] = frozenset(["input_tokens"])
| 654 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__UpperCAmelCase = len(UpperCamelCase__ )
__UpperCAmelCase = max(UpperCamelCase__ )
__UpperCAmelCase = min(UpperCamelCase__ )
# create the counting array
__UpperCAmelCase = coll_max + 1 - coll_min
__UpperCAmelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__UpperCAmelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
__UpperCAmelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__lowerCAmelCase : str = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 654 | 1 |
'''simple docstring'''
class A :
def __init__( self : Optional[Any] ) -> None:
__UpperCAmelCase = {} # Mapping from char to TrieNode
__UpperCAmelCase = False
def snake_case__ ( self : Optional[int] , __a : list[str] ) -> None:
for word in words:
self.insert(__a )
def snake_case__ ( self : List[str] , __a : str ) -> None:
__UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
__UpperCAmelCase = TrieNode()
__UpperCAmelCase = curr.nodes[char]
__UpperCAmelCase = True
def snake_case__ ( self : Optional[int] , __a : str ) -> bool:
__UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
__UpperCAmelCase = curr.nodes[char]
return curr.is_leaf
def snake_case__ ( self : Tuple , __a : str ) -> None:
def _delete(__a : TrieNode , __a : str , __a : int ) -> bool:
if index == len(__a ):
# If word does not exist
if not curr.is_leaf:
return False
__UpperCAmelCase = False
return len(curr.nodes ) == 0
__UpperCAmelCase = word[index]
__UpperCAmelCase = curr.nodes.get(__a )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
__UpperCAmelCase = _delete(__a , __a , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __a , 0 )
def lowerCAmelCase ( UpperCamelCase__ : TrieNode , UpperCamelCase__ : str ):
"""simple docstring"""
if node.is_leaf:
print(UpperCamelCase__ , end=''' ''' )
for key, value in node.nodes.items():
print_words(UpperCamelCase__ , word + key )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = '''banana bananas bandana band apple all beast'''.split()
__UpperCAmelCase = TrieNode()
root.insert_many(UpperCamelCase__ )
# print_words(root, "")
assert all(root.find(UpperCamelCase__ ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : bool ):
"""simple docstring"""
print(str(UpperCamelCase__ ) , '''works!''' if passes else '''doesn\'t work :(''' )
def lowerCAmelCase ( ):
"""simple docstring"""
assert test_trie()
def lowerCAmelCase ( ):
"""simple docstring"""
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 654 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase ( UpperCamelCase__ : str = "AAPL" ):
"""simple docstring"""
__UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' )
__UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 654 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A ( UpperCAmelCase , unittest.TestCase ):
a_ = KandinskyVaaPriorPipeline
a_ = ['''prompt''']
a_ = ['''prompt''', '''negative_prompt''']
a_ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
a_ = False
@property
def snake_case__ ( self : Union[str, Any] ) -> int:
return 3_2
@property
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
return 3_2
@property
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
return self.time_input_dim
@property
def snake_case__ ( self : Optional[Any] ) -> List[str]:
return self.time_input_dim * 4
@property
def snake_case__ ( self : Any ) -> Dict:
return 1_0_0
@property
def snake_case__ ( self : str ) -> Any:
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__a )
@property
def snake_case__ ( self : Optional[int] ) -> List[Any]:
torch.manual_seed(0 )
__UpperCAmelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_2,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
__UpperCAmelCase = PriorTransformer(**__a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__UpperCAmelCase = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def snake_case__ ( self : Optional[Any] ) -> Dict:
torch.manual_seed(0 )
__UpperCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
__UpperCAmelCase = CLIPVisionModelWithProjection(__a )
return model
@property
def snake_case__ ( self : Dict ) -> Tuple:
__UpperCAmelCase = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=__a , do_normalize=__a , do_resize=__a , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=2_2_4 , )
return image_processor
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase = self.dummy_prior
__UpperCAmelCase = self.dummy_image_encoder
__UpperCAmelCase = self.dummy_text_encoder
__UpperCAmelCase = self.dummy_tokenizer
__UpperCAmelCase = self.dummy_image_processor
__UpperCAmelCase = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_0_0_0 , clip_sample=__a , clip_sample_range=1_0.0 , )
__UpperCAmelCase = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def snake_case__ ( self : Any , __a : Any , __a : str=0 ) -> Optional[int]:
if str(__a ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(__a )
else:
__UpperCAmelCase = torch.Generator(device=__a ).manual_seed(__a )
__UpperCAmelCase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase = '''cpu'''
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = self.pipeline_class(**__a )
__UpperCAmelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = pipe(**self.get_dummy_inputs(__a ) )
__UpperCAmelCase = output.image_embeds
__UpperCAmelCase = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
__UpperCAmelCase = image[0, -1_0:]
__UpperCAmelCase = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
__UpperCAmelCase = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def snake_case__ ( self : List[Any] ) -> Any:
__UpperCAmelCase = torch_device == '''cpu'''
__UpperCAmelCase = True
__UpperCAmelCase = False
self._test_inference_batch_single_identical(
test_max_difference=__a , relax_max_difference=__a , test_mean_pixel_difference=__a , )
@skip_mps
def snake_case__ ( self : int ) -> List[str]:
__UpperCAmelCase = torch_device == '''cpu'''
__UpperCAmelCase = False
self._test_attention_slicing_forward_pass(
test_max_difference=__a , test_mean_pixel_difference=__a , )
| 654 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
__UpperCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i]
__UpperCAmelCase = []
__UpperCAmelCase = 0
__UpperCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__UpperCAmelCase = []
__UpperCAmelCase = -1
for i in range(UpperCamelCase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__UpperCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__UpperCAmelCase = 0
__UpperCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__lowerCAmelCase : List[Any] = 4
__lowerCAmelCase : List[Any] = [2, 5, 3, 7]
__lowerCAmelCase : Tuple = [0, 0, 0, 0]
__lowerCAmelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase : Dict = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 654 | 1 |
'''simple docstring'''
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A ( UpperCAmelCase ):
def __init__( self : Dict , __a : str , __a : Dict=1_3 , __a : Any=7 , __a : Tuple=True , __a : List[str]=True , __a : int=False , __a : Optional[Any]=True , __a : str=9_9 , __a : Optional[int]=3_2 , __a : int=5 , __a : str=4 , __a : int=6_4 , __a : Dict="gelu" , __a : Dict=0.1 , __a : List[Any]=0.1 , __a : List[str]=5_1_2 , __a : int=1_6 , __a : Tuple=2 , __a : Optional[Any]=0.0_2 , __a : Tuple=3 , __a : List[Any]=4 , __a : Dict=None , __a : Union[str, Any]=2 , __a : Dict=2 , __a : Dict=2 , __a : Optional[Any]=2 , __a : Dict=4 , __a : Optional[Any]=1 , ) -> str:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_input_mask
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_labels
__UpperCAmelCase = num_choices
__UpperCAmelCase = scope
__UpperCAmelCase = q_groups
__UpperCAmelCase = k_groups
__UpperCAmelCase = v_groups
__UpperCAmelCase = post_attention_groups
__UpperCAmelCase = intermediate_groups
__UpperCAmelCase = output_groups
def snake_case__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Optional[Any] ) -> int:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def snake_case__ ( self : Dict , __a : List[str] , __a : Optional[Any] , __a : Any , __a : Union[str, Any] , __a : Any , __a : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase = SqueezeBertModel(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(__a , __a )
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : List[Any] , __a : Any , __a : List[str] , __a : List[Any] , __a : Union[str, Any] , __a : int , __a : Union[str, Any] ) -> str:
__UpperCAmelCase = SqueezeBertForMaskedLM(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : str , __a : List[str] , __a : Optional[Any] , __a : str , __a : str , __a : Optional[Any] , __a : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase = SqueezeBertForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(
__a , attention_mask=__a , start_positions=__a , end_positions=__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : int , __a : List[Any] , __a : Union[str, Any] , __a : str , __a : Union[str, Any] , __a : Any , __a : Any ) -> List[Any]:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = SqueezeBertForSequenceClassification(__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Optional[Any] , __a : Tuple , __a : int , __a : Any , __a : Union[str, Any] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = SqueezeBertForTokenClassification(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : Any , __a : Union[str, Any] , __a : List[Any] , __a : Any , __a : Any , __a : Any , __a : int ) -> Tuple:
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = SqueezeBertForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = model(
__a , attention_mask=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : Dict ) -> List[str]:
__UpperCAmelCase = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
a_ = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = True
a_ = False
def snake_case__ ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase = SqueezeBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , dim=3_7 )
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*__a )
def snake_case__ ( self : Union[str, Any] ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*__a )
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*__a )
def snake_case__ ( self : int ) -> List[str]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__a )
def snake_case__ ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*__a )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__a )
@slow
def snake_case__ ( self : Dict ) -> Union[str, Any]:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = SqueezeBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_sentencepiece
@require_tokenizers
@require_torch
class A ( unittest.TestCase ):
@slow
def snake_case__ ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
__UpperCAmelCase = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
__UpperCAmelCase = model(__a )[0]
__UpperCAmelCase = torch.Size((1, 3) )
self.assertEqual(output.shape , __a )
__UpperCAmelCase = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(__a , __a , atol=1e-4 ) )
| 654 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[str] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : List[str] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : List[Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Optional[Any] , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Tuple , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : str , **__a : Tuple ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : int ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : List[str] , **__a : Optional[int] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Any ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Dict , **__a : List[str] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Optional[int] , **__a : Optional[int] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[str] , **__a : List[str] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[int] , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : str ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Any ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : str , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Optional[int] , **__a : Union[str, Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Union[str, Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Union[str, Any] , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : int , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : str ) -> Dict:
requires_backends(cls , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : str , **UpperCamelCase__ : str ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : str , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : str , **__a : List[str] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : List[Any] , **__a : List[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : Tuple ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : str , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : str ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : Tuple ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Tuple , **__a : str ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : str , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : int , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : str , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : int , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Union[str, Any] , **__a : Optional[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[Any] , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Dict ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Union[str, Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : Dict ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Tuple , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : Any ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Optional[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Union[str, Any] , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : Optional[int] , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Any , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : int , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Tuple , **__a : Optional[int] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : Tuple ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Union[str, Any] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[Any] , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : int , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Any , **__a : int ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Dict ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : int , **__a : Optional[int] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Dict , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Any , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : Tuple , **__a : Optional[int] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Optional[Any] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : Dict ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Union[str, Any] , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Any , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Union[str, Any] , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : List[Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Dict , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : Union[str, Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : int ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Optional[Any] , **__a : int ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[Any] , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Optional[Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[int] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[str] , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Tuple , **__a : Tuple ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[str] , **__a : int ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Tuple , **__a : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Any , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : str ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[str] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : str , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[Any] , **__a : List[str] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[str] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : str , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Tuple ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Any , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Tuple ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : int , **__a : Optional[Any] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Optional[int] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[str] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : List[str] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
| 654 | 1 |
'''simple docstring'''
from math import sqrt
def lowerCAmelCase ( UpperCamelCase__ : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(UpperCamelCase__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 654 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 654 | 1 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class A :
def __init__( self : Tuple , __a : int | None = None ) -> str:
__UpperCAmelCase = value
__UpperCAmelCase = None # Added in order to delete a node easier
__UpperCAmelCase = None
__UpperCAmelCase = None
def __repr__( self : Optional[Any] ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class A :
def __init__( self : int , __a : Node | None = None ) -> List[Any]:
__UpperCAmelCase = root
def __str__( self : Any ) -> str:
return str(self.root )
def snake_case__ ( self : List[str] , __a : Node , __a : Node | None ) -> None:
if new_children is not None: # reset its kids
__UpperCAmelCase = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__a ): # If it is the right children
__UpperCAmelCase = new_children
else:
__UpperCAmelCase = new_children
else:
__UpperCAmelCase = new_children
def snake_case__ ( self : List[str] , __a : Node ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def snake_case__ ( self : str ) -> bool:
return self.root is None
def snake_case__ ( self : Optional[int] , __a : List[str] ) -> None:
__UpperCAmelCase = Node(__a ) # create a new Node
if self.empty(): # if Tree is empty
__UpperCAmelCase = new_node # set its root
else: # Tree is not empty
__UpperCAmelCase = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
__UpperCAmelCase = new_node # We insert the new node in a leaf
break
else:
__UpperCAmelCase = parent_node.left
else:
if parent_node.right is None:
__UpperCAmelCase = new_node
break
else:
__UpperCAmelCase = parent_node.right
__UpperCAmelCase = parent_node
def snake_case__ ( self : str , *__a : int ) -> None:
for value in values:
self.__insert(__a )
def snake_case__ ( self : Optional[int] , __a : Dict ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
__UpperCAmelCase = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
__UpperCAmelCase = node.left if value < node.value else node.right
return node
def snake_case__ ( self : List[Any] , __a : Node | None = None ) -> Node | None:
if node is None:
if self.root is None:
return None
__UpperCAmelCase = self.root
if not self.empty():
while node.right is not None:
__UpperCAmelCase = node.right
return node
def snake_case__ ( self : Any , __a : Node | None = None ) -> Node | None:
if node is None:
__UpperCAmelCase = self.root
if self.root is None:
return None
if not self.empty():
__UpperCAmelCase = self.root
while node.left is not None:
__UpperCAmelCase = node.left
return node
def snake_case__ ( self : Tuple , __a : int ) -> None:
__UpperCAmelCase = self.search(__a ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__a , __a )
elif node.left is None: # Has only right children
self.__reassign_nodes(__a , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__a , node.left )
else:
__UpperCAmelCase = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
__UpperCAmelCase = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def snake_case__ ( self : Optional[Any] , __a : Node | None ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def snake_case__ ( self : Any , __a : Optional[int]=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def snake_case__ ( self : Any , __a : list , __a : Node | None ) -> None:
if node:
self.inorder(__a , node.left )
arr.append(node.value )
self.inorder(__a , node.right )
def snake_case__ ( self : Any , __a : int , __a : Node ) -> int:
__UpperCAmelCase = []
self.inorder(__a , __a ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCAmelCase ( UpperCamelCase__ : Node | None ):
"""simple docstring"""
__UpperCAmelCase = []
if curr_node is not None:
__UpperCAmelCase = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = (8, 3, 6, 1, 1_0, 1_4, 1_3, 4, 7)
__UpperCAmelCase = BinarySearchTree()
for i in testlist:
t.insert(UpperCamelCase__ )
# Prints all the elements of the list in order traversal
print(UpperCamelCase__ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCamelCase__ )
print(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 654 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[Any] = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 | 1 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float ):
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(100, 0.2_5) = }""")
print(F"""{price_plus_tax(1_2_5.5_0, 0.0_5) = }""")
| 654 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , ):
"""simple docstring"""
__UpperCAmelCase = {}
if train_file is not None:
__UpperCAmelCase = [train_file]
if eval_file is not None:
__UpperCAmelCase = [eval_file]
if test_file is not None:
__UpperCAmelCase = [test_file]
__UpperCAmelCase = datasets.load_dataset('''csv''' , data_files=UpperCamelCase__ )
__UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() )
__UpperCAmelCase = features_name.pop(UpperCamelCase__ )
__UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
__UpperCAmelCase = {label: i for i, label in enumerate(UpperCamelCase__ )}
__UpperCAmelCase = tokenizer.model_input_names
__UpperCAmelCase = {}
if len(UpperCamelCase__ ) == 1:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' ) , batched=UpperCamelCase__ , )
elif len(UpperCamelCase__ ) == 2:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , ) , batched=UpperCamelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class A :
a_ = field(metadata={'''help''': '''Which column contains the label'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the training file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the development file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the test file'''} )
a_ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
a_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def lowerCAmelCase ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase__ ) , labelaid=UpperCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict:
__UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__UpperCAmelCase = TFTrainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCAmelCase = trainer.evaluate()
__UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(UpperCamelCase__ )
return results
if __name__ == "__main__":
main()
| 654 | 1 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str]=1E-1_2 ):
"""simple docstring"""
__UpperCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(UpperCamelCase__ , axis=1 ) , a_min=UpperCamelCase__ ) ).T
__UpperCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(UpperCamelCase__ , axis=1 ) , a_min=UpperCamelCase__ ) ).T
return jnp.matmul(UpperCamelCase__ , norm_emb_a.T )
class A ( nn.Module ):
a_ = 42
a_ = jnp.floataa
def snake_case__ ( self : int ) -> str:
__UpperCAmelCase = FlaxCLIPVisionModule(self.config.vision_config )
__UpperCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__a , dtype=self.dtype )
__UpperCAmelCase = self.param('''concept_embeds''' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) )
__UpperCAmelCase = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__UpperCAmelCase = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (1_7,) )
__UpperCAmelCase = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__( self : Tuple , __a : Any ) -> str:
__UpperCAmelCase = self.vision_model(__a )[1]
__UpperCAmelCase = self.visual_projection(__a )
__UpperCAmelCase = jax_cosine_distance(__a , self.special_care_embeds )
__UpperCAmelCase = jax_cosine_distance(__a , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__UpperCAmelCase = 0.0
__UpperCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__UpperCAmelCase = jnp.round(__a , 3 )
__UpperCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__a )
# Use a lower threshold if an image has any special care concept
__UpperCAmelCase = is_special_care * 0.0_1
__UpperCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__UpperCAmelCase = jnp.round(__a , 3 )
__UpperCAmelCase = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class A ( UpperCAmelCase ):
a_ = CLIPConfig
a_ = '''clip_input'''
a_ = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Optional[int] , __a : CLIPConfig , __a : Optional[Tuple] = None , __a : int = 0 , __a : jnp.dtype = jnp.floataa , __a : bool = True , **__a : Optional[Any] , ) -> Optional[Any]:
if input_shape is None:
__UpperCAmelCase = (1, 2_2_4, 2_2_4, 3)
__UpperCAmelCase = self.module_class(config=__a , dtype=__a , **__a )
super().__init__(__a , __a , input_shape=__a , seed=__a , dtype=__a , _do_init=_do_init )
def snake_case__ ( self : List[Any] , __a : jax.random.KeyArray , __a : Tuple , __a : FrozenDict = None ) -> FrozenDict:
# init input tensor
__UpperCAmelCase = jax.random.normal(__a , __a )
__UpperCAmelCase , __UpperCAmelCase = jax.random.split(__a )
__UpperCAmelCase = {'''params''': params_rng, '''dropout''': dropout_rng}
__UpperCAmelCase = self.module.init(__a , __a )['''params''']
return random_params
def __call__( self : int , __a : List[str] , __a : dict = None , ) -> Dict:
__UpperCAmelCase = jnp.transpose(__a , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(__a , dtype=jnp.floataa ) , rngs={} , )
| 654 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__( self : List[Any] , __a : Any , ) -> Dict:
__UpperCAmelCase = parent
__UpperCAmelCase = 1_3
__UpperCAmelCase = 7
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = 9_9
__UpperCAmelCase = 3_2
__UpperCAmelCase = 2
__UpperCAmelCase = 4
__UpperCAmelCase = 3_7
__UpperCAmelCase = '''gelu'''
__UpperCAmelCase = 0.1
__UpperCAmelCase = 0.1
__UpperCAmelCase = 5_1_2
__UpperCAmelCase = 1_6
__UpperCAmelCase = 2
__UpperCAmelCase = 0.0_2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
__UpperCAmelCase = None
def snake_case__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Union[str, Any] , __a : List[str] , __a : int , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : int ) -> Any:
__UpperCAmelCase = TFDistilBertModel(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
__UpperCAmelCase = [input_ids, input_mask]
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Tuple , __a : List[Any] , __a : int , __a : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : List[Any] ) -> int:
__UpperCAmelCase = TFDistilBertForMaskedLM(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[int] , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Optional[int] ) -> Dict:
__UpperCAmelCase = TFDistilBertForQuestionAnswering(config=__a )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Any , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : Dict , __a : int , __a : List[Any] ) -> Dict:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForSequenceClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] , __a : Dict ) -> str:
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = TFDistilBertForMultipleChoice(__a )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : Tuple , __a : int , __a : Optional[int] , __a : Optional[int] ) -> int:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForTokenClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : str ) -> Any:
__UpperCAmelCase = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = TFDistilBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , dim=3_7 )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ) -> str:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__a )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__a )
def snake_case__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__a )
def snake_case__ ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a )
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a )
def snake_case__ ( self : List[str] ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__a )
@slow
def snake_case__ ( self : Dict ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCAmelCase = TFDistilBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class A ( unittest.TestCase ):
@slow
def snake_case__ ( self : int ) -> Dict:
__UpperCAmelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase = model(__a )[0]
__UpperCAmelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , __a )
__UpperCAmelCase = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
| 654 | 1 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowerCAmelCase ( UpperCamelCase__ : Dict ):
"""simple docstring"""
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
class A :
def __init__( self : Tuple , __a : List[str] ) -> Tuple:
__UpperCAmelCase = metric_id
class A :
a_ = [MetricMock(UpperCAmelCase ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def snake_case__ ( self : List[str] ) -> Tuple:
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
if "tmp_path" in args:
__UpperCAmelCase = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(UpperCamelCase__ , match='''https://huggingface.co/docs/evaluate''' ):
func(*UpperCamelCase__ )
| 654 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowerCAmelCase : List[Any] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 | 1 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class A ( UpperCAmelCase ):
def __init__( self : Optional[Any] , __a : Union[str, "sqlalchemy.sql.Selectable"] , __a : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __a : Optional[Features] = None , __a : str = None , __a : bool = False , **__a : Dict , ) -> Tuple:
super().__init__(features=__a , cache_dir=__a , keep_in_memory=__a , **__a )
__UpperCAmelCase = Sql(
cache_dir=__a , features=__a , sql=__a , con=__a , **__a , )
def snake_case__ ( self : Any ) -> Optional[int]:
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , )
# Build dataset for splits
__UpperCAmelCase = self.builder.as_dataset(
split='''train''' , verification_mode=__a , in_memory=self.keep_in_memory )
return dataset
class A :
def __init__( self : List[str] , __a : Dataset , __a : str , __a : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __a : Optional[int] = None , __a : Optional[int] = None , **__a : Optional[Any] , ) -> Any:
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__UpperCAmelCase = dataset
__UpperCAmelCase = name
__UpperCAmelCase = con
__UpperCAmelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__UpperCAmelCase = num_proc
__UpperCAmelCase = to_sql_kwargs
def snake_case__ ( self : List[Any] ) -> int:
__UpperCAmelCase = self.to_sql_kwargs.pop('''sql''' , __a )
__UpperCAmelCase = self.to_sql_kwargs.pop('''con''' , __a )
__UpperCAmelCase = self.to_sql_kwargs.pop('''index''' , __a )
__UpperCAmelCase = self._write(index=__a , **self.to_sql_kwargs )
return written
def snake_case__ ( self : str , __a : Optional[Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = args
__UpperCAmelCase = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
__UpperCAmelCase = query_table(
table=self.dataset.data , key=slice(__a , offset + self.batch_size ) , indices=self.dataset._indices , )
__UpperCAmelCase = batch.to_pandas()
__UpperCAmelCase = df.to_sql(self.name , self.con , index=__a , **__a )
return num_rows or len(__a )
def snake_case__ ( self : str , __a : Optional[int] , **__a : List[str] ) -> int:
__UpperCAmelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__UpperCAmelCase , __UpperCAmelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __a , __a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 654 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class A ( UpperCAmelCase ):
a_ = '''bert-generation'''
def __init__( self : str , __a : str=5_0_3_5_8 , __a : int=1_0_2_4 , __a : Optional[Any]=2_4 , __a : Any=1_6 , __a : int=4_0_9_6 , __a : Any="gelu" , __a : Union[str, Any]=0.1 , __a : Any=0.1 , __a : Union[str, Any]=5_1_2 , __a : int=0.0_2 , __a : str=1e-12 , __a : List[str]=0 , __a : Optional[int]=2 , __a : Tuple=1 , __a : str="absolute" , __a : Optional[Any]=True , **__a : Tuple , ) -> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
| 654 | 1 |
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__lowerCAmelCase : str = 299_792_458
# Symbols
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = symbols("ct x y z")
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 )
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return np.array(
[
[gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0],
[-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : np.ndarray | None = None ):
"""simple docstring"""
# Ensure event is not empty
if event is None:
__UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCamelCase__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__lowerCAmelCase : Dict = transform(29_979_245)
print("Example of four vector: ")
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__lowerCAmelCase : Union[str, Any] = {ct: c, x: 1, y: 1, z: 1}
__lowerCAmelCase : Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 654 |
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__lowerCAmelCase : str = 299_792_458
# Symbols
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = symbols("ct x y z")
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 )
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return np.array(
[
[gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0],
[-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : np.ndarray | None = None ):
"""simple docstring"""
# Ensure event is not empty
if event is None:
__UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCamelCase__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__lowerCAmelCase : Dict = transform(29_979_245)
print("Example of four vector: ")
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__lowerCAmelCase : Union[str, Any] = {ct: c, x: 1, y: 1, z: 1}
__lowerCAmelCase : Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 654 | 1 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase ( UpperCamelCase__ : Tuple="ro" , UpperCamelCase__ : Any="en" , UpperCamelCase__ : Dict="wmt16" , UpperCamelCase__ : Dict=None ):
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
__UpperCAmelCase = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
__UpperCAmelCase = datasets.load_dataset(UpperCamelCase__ , UpperCamelCase__ )
if save_dir is None:
__UpperCAmelCase = f"""{dataset}-{pair}"""
__UpperCAmelCase = Path(UpperCamelCase__ )
save_dir.mkdir(exist_ok=UpperCamelCase__ )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
__UpperCAmelCase = '''val''' if split == '''validation''' else split
__UpperCAmelCase = save_dir.joinpath(f"""{fn}.source""" )
__UpperCAmelCase = save_dir.joinpath(f"""{fn}.target""" )
__UpperCAmelCase = src_path.open('''w+''' )
__UpperCAmelCase = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__UpperCAmelCase = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 654 |
'''simple docstring'''
import heapq
import sys
import numpy as np
__lowerCAmelCase : Any = tuple[int, int]
class A :
def __init__( self : Optional[int] ) -> int:
__UpperCAmelCase = []
__UpperCAmelCase = set()
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def snake_case__ ( self : Dict ) -> Optional[int]:
return len(self.elements ) == 0
def snake_case__ ( self : Optional[int] , __a : Optional[Any] , __a : Dict ) -> Optional[Any]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__a )
else:
# update
# print("update", item)
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def snake_case__ ( self : int , __a : Any ) -> int:
if item in self.set:
self.set.remove(__a )
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def snake_case__ ( self : List[str] ) -> Dict:
return self.elements[0][1]
def snake_case__ ( self : Any ) -> List[str]:
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
self.set.remove(__a )
return (priority, item)
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# euclidean distance
__UpperCAmelCase = np.array(UpperCamelCase__ )
__UpperCAmelCase = np.array(UpperCamelCase__ )
return np.linalg.norm(a - b )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# integer division by time variable
return consistent_heuristic(UpperCamelCase__ , UpperCamelCase__ ) // t
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] ):
"""simple docstring"""
__UpperCAmelCase = g_function[start] + Wa * heuristics[i](UpperCamelCase__ , UpperCamelCase__ )
return ans
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase = np.chararray((n, n) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
__UpperCAmelCase = '''*'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (j, (n - 1) - i) in blocks:
__UpperCAmelCase = '''#'''
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[goal]
while x != start:
((__UpperCAmelCase) , (__UpperCAmelCase)) = x
# print(x)
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[x]
__UpperCAmelCase = '''-'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
__UpperCAmelCase = back_pointer[goal]
while x != start:
print(UpperCamelCase__ , end=''' ''' )
__UpperCAmelCase = back_pointer[x]
print(UpperCamelCase__ )
sys.exit()
def lowerCAmelCase ( UpperCamelCase__ : TPos ):
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , ):
"""simple docstring"""
for itera in range(UpperCamelCase__ ):
open_list[itera].remove_element(UpperCamelCase__ )
# print("s", s)
# print("j", j)
((__UpperCAmelCase) , (__UpperCAmelCase)) = s
__UpperCAmelCase = (x - 1, y)
__UpperCAmelCase = (x + 1, y)
__UpperCAmelCase = (x, y + 1)
__UpperCAmelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(UpperCamelCase__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(UpperCamelCase__ )
__UpperCAmelCase = -1
__UpperCAmelCase = float('''inf''' )
if valid(UpperCamelCase__ ) and g_function[neighbours] > g_function[s] + 1:
__UpperCAmelCase = g_function[s] + 1
__UpperCAmelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(UpperCamelCase__ , key(UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ) )
if neighbours not in close_list_inad:
for var in range(1 , UpperCamelCase__ ):
if key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) <= Wa * key(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ):
open_list[j].put(
UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
__lowerCAmelCase : Optional[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__lowerCAmelCase : List[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__lowerCAmelCase : Dict = make_common_ground()
__lowerCAmelCase : int = blocks_blk
# hyper parameters
__lowerCAmelCase : Dict = 1
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : Union[str, Any] = 20
__lowerCAmelCase : Any = 3 # one consistent and two other inconsistent
# start and end destination
__lowerCAmelCase : Optional[Any] = (0, 0)
__lowerCAmelCase : Any = (n - 1, n - 1)
__lowerCAmelCase : Optional[int] = 1
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = {start: 0, goal: float('''inf''' )}
__UpperCAmelCase = {start: -1, goal: -1}
__UpperCAmelCase = []
__UpperCAmelCase = set()
for i in range(UpperCamelCase__ ):
open_list.append(PriorityQueue() )
open_list[i].put(UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
__UpperCAmelCase = []
__UpperCAmelCase = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , UpperCamelCase__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase , __UpperCAmelCase = open_list[i].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_inad.append(UpperCamelCase__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase = open_list[0].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_anchor.append(UpperCamelCase__ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(UpperCamelCase__ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 654 | 1 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__lowerCAmelCase : int = pytest.mark.integration
__lowerCAmelCase : Dict = {"comet"}
__lowerCAmelCase : str = importlib.util.find_spec("fairseq") is not None
__lowerCAmelCase : Dict = {"code_eval"}
__lowerCAmelCase : Any = os.name == "nt"
__lowerCAmelCase : Optional[Any] = {"bertscore", "frugalscore", "perplexity"}
__lowerCAmelCase : str = importlib.util.find_spec("transformers") is not None
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
@wraps(UpperCamelCase__ )
def wrapper(self : List[Any] , UpperCamelCase__ : Dict ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self , UpperCamelCase__ )
return wrapper
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
@wraps(UpperCamelCase__ )
def wrapper(self : List[Any] , UpperCamelCase__ : Any ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self , UpperCamelCase__ )
return wrapper
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
@wraps(UpperCamelCase__ )
def wrapper(self : Union[str, Any] , UpperCamelCase__ : Tuple ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self , UpperCamelCase__ )
return wrapper
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@local
class A ( parameterized.TestCase ):
a_ = {}
a_ = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def snake_case__ ( self : Union[str, Any] , __a : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase = '''[...]'''
__UpperCAmelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , __a ) ).module_path )
__UpperCAmelCase = datasets.load.import_main_class(metric_module.__name__ , dataset=__a )
# check parameters
__UpperCAmelCase = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__a , metric_module.__name__ ):
with self.use_local_metrics():
try:
__UpperCAmelCase = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def snake_case__ ( self : Union[str, Any] , __a : Any ) -> List[Any]:
__UpperCAmelCase = '''[...]'''
__UpperCAmelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , __a ) ).module_path )
# run doctest
with self.use_local_metrics():
__UpperCAmelCase = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def snake_case__ ( self : Any , __a : List[Any] , __a : Tuple ) -> Dict:
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__a ):
yield
else:
yield
@contextmanager
def snake_case__ ( self : Optional[Any] ) -> List[str]:
def load_local_metric(__a : str , *__a : str , **__a : Any ):
return load_metric(os.path.join('''metrics''' , __a ) , *__a , **__a )
with patch('''datasets.load_metric''' ) as mock_load_metric:
__UpperCAmelCase = load_local_metric
yield
@classmethod
def snake_case__ ( cls : str , __a : Optional[Any] ) -> Optional[Any]:
def wrapper(__a : str ):
__UpperCAmelCase = contextmanager(__a )
__UpperCAmelCase = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags
class A ( UpperCAmelCase ):
def snake_case__ ( self : str , __a : List[Any] ) -> List[str]:
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.0_3, 1.0_4] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
__UpperCAmelCase = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
import torch
def bert_cos_score_idf(UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , *UpperCamelCase__ : Any , **UpperCamelCase__ : str ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase__ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
__UpperCAmelCase = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
def load_from_checkpoint(UpperCamelCase__ : Tuple ):
class A :
def snake_case__ ( self : List[Any] , __a : Optional[Any] , *__a : Any , **__a : List[Any] ) -> Optional[Any]:
assert len(__a ) == 2
__UpperCAmelCase = [0.1_9, 0.9_2]
return scores, sum(__a ) / len(__a )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
__UpperCAmelCase = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
__UpperCAmelCase = load_from_checkpoint
yield
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = load_metric(os.path.join('''metrics''' , '''seqeval''' ) )
__UpperCAmelCase = '''ERROR'''
__UpperCAmelCase = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(UpperCamelCase__ , match=re.escape(UpperCamelCase__ ) ):
metric.compute(predictions=[] , references=[] , scheme=UpperCamelCase__ )
| 654 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__lowerCAmelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : str = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase : int = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__lowerCAmelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase : List[str] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__lowerCAmelCase : Optional[int] = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
__UpperCAmelCase = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = tf_models
__UpperCAmelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = flax_models
__UpperCAmelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = pt_models
__UpperCAmelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCAmelCase = True
break
# Try again after removing the last word in the name
__UpperCAmelCase = ''''''.join(camel_case_split(UpperCamelCase__ )[:-1] )
__UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCAmelCase = list(UpperCamelCase__ )
all_models.sort()
__UpperCAmelCase = {'''model_type''': all_models}
__UpperCAmelCase = [pt_models[t] for t in all_models]
__UpperCAmelCase = [tf_models[t] for t in all_models]
__UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCAmelCase = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCAmelCase = '''AutoTokenizer'''
__UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCAmelCase = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
__UpperCAmelCase = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
__UpperCAmelCase = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = get_frameworks_table()
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
__UpperCAmelCase = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=UpperCamelCase__ )
__UpperCAmelCase = Dataset.from_json(UpperCamelCase__ )
__UpperCAmelCase = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(UpperCamelCase__ ) )
}
__UpperCAmelCase = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCAmelCase = sorted(table.keys() )
__UpperCAmelCase = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , '''pipeline_tags.json''' ) )
if commit_sha is not None:
__UpperCAmelCase = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCAmelCase = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=UpperCamelCase__ , repo_type='''dataset''' , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCAmelCase = pipeline_tasks[key]['''pt''']
if isinstance(UpperCamelCase__ , (list, tuple) ):
__UpperCAmelCase = model[0]
__UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ''', '''.join(UpperCamelCase__ )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__lowerCAmelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 654 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A :
a_ = XGLMConfig
a_ = {}
a_ = '''gelu'''
def __init__( self : List[str] , __a : int , __a : Optional[Any]=1_4 , __a : Optional[int]=7 , __a : Dict=True , __a : Optional[int]=True , __a : Tuple=True , __a : int=9_9 , __a : Optional[int]=3_2 , __a : str=2 , __a : Tuple=4 , __a : Any=3_7 , __a : int="gelu" , __a : Tuple=0.1 , __a : Optional[int]=0.1 , __a : str=5_1_2 , __a : List[str]=0.0_2 , ) -> Optional[Any]:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_input_mask
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = d_model
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = ffn_dim
__UpperCAmelCase = activation_function
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = None
__UpperCAmelCase = 0
__UpperCAmelCase = 2
__UpperCAmelCase = 1
def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]:
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = self.get_config()
__UpperCAmelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case__ ( self : Optional[int] ) -> Tuple:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__a , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__a , )
def snake_case__ ( self : Any ) -> str:
__UpperCAmelCase = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = config_and_inputs
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
a_ = (TFXGLMForCausalLM,) if is_tf_available() else ()
a_ = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
a_ = False
a_ = False
a_ = False
def snake_case__ ( self : Dict ) -> int:
__UpperCAmelCase = TFXGLMModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , n_embd=3_7 )
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
@slow
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = TFXGLMModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def snake_case__ ( self : List[Any] ) -> int:
super().test_resize_token_embeddings()
@require_tf
class A ( unittest.TestCase ):
@slow
def snake_case__ ( self : Dict , __a : Optional[Any]=True ) -> Union[str, Any]:
__UpperCAmelCase = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
__UpperCAmelCase = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCAmelCase = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
__UpperCAmelCase = model.generate(__a , do_sample=__a , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __a )
@slow
def snake_case__ ( self : Union[str, Any] ) -> Tuple:
__UpperCAmelCase = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
__UpperCAmelCase = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
__UpperCAmelCase = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
__UpperCAmelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
__UpperCAmelCase = model.generate(__a , do_sample=__a , seed=[7, 0] )
__UpperCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=__a )
__UpperCAmelCase = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(__a , __a )
@slow
def snake_case__ ( self : List[Any] ) -> Dict:
__UpperCAmelCase = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
__UpperCAmelCase = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
__UpperCAmelCase = '''left'''
# use different length sentences to test batching
__UpperCAmelCase = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
__UpperCAmelCase = tokenizer(__a , return_tensors='''tf''' , padding=__a )
__UpperCAmelCase = inputs['''input_ids''']
__UpperCAmelCase = model.generate(input_ids=__a , attention_mask=inputs['''attention_mask'''] , max_new_tokens=1_2 )
__UpperCAmelCase = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__UpperCAmelCase = model.generate(input_ids=__a , max_new_tokens=1_2 )
__UpperCAmelCase = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__UpperCAmelCase = model.generate(input_ids=__a , max_new_tokens=1_2 )
__UpperCAmelCase = tokenizer.batch_decode(__a , skip_special_tokens=__a )
__UpperCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a )
__UpperCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__a )
__UpperCAmelCase = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , [non_padded_sentence, padded_sentence] )
| 654 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCAmelCase : Optional[int] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class A ( unittest.TestCase ):
def snake_case__ ( self : Any , __a : str , __a : bool , __a : str = None , __a : list = None ) -> Tuple:
__UpperCAmelCase = None
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
__UpperCAmelCase = os.path.abspath('''examples''' )
for item in os.listdir(__a ):
if item not in EXCLUDE_EXAMPLES:
__UpperCAmelCase = os.path.join(__a , __a )
if os.path.isfile(__a ) and ".py" in item_path:
with self.subTest(
tested_script=__a , feature_script=__a , tested_section='''main()''' if parser_only else '''training_function()''' , ):
__UpperCAmelCase = compare_against_test(
os.path.join(__a , __a ) , __a , __a , __a )
__UpperCAmelCase = '''\n'''.join(__a )
if special_strings is not None:
for string in special_strings:
__UpperCAmelCase = diff.replace(__a , '''''' )
self.assertEqual(__a , '''''' )
def snake_case__ ( self : Optional[Any] ) -> str:
self.one_complete_example('''complete_nlp_example.py''' , __a )
self.one_complete_example('''complete_nlp_example.py''' , __a )
def snake_case__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
__UpperCAmelCase = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class A ( UpperCAmelCase ):
a_ = False
@classmethod
def snake_case__ ( cls : Tuple ) -> str:
super().setUpClass()
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Dict ) -> int:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
def snake_case__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
if torch.cuda.is_available():
__UpperCAmelCase = torch.cuda.device_count()
else:
__UpperCAmelCase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
else:
self.assertIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
@slow
def snake_case__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
__UpperCAmelCase = re.findall('''({.+})''' , __a )
__UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1]
__UpperCAmelCase = ast.literal_eval(__a )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case__ ( self : Dict ) -> int:
__UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCAmelCase = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__a , '''tracking''' ) ) )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 654 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
class A ( UpperCAmelCase ):
a_ = ['''pixel_values''']
def __init__( self : Optional[Any] , __a : bool = True , __a : Optional[Dict[str, int]] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : bool = True , __a : Union[int, float] = 1 / 2_5_5 , __a : Dict[str, int] = None , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : int , ) -> None:
super().__init__(**__a )
__UpperCAmelCase = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
__UpperCAmelCase = get_size_dict(__a )
__UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
__UpperCAmelCase = get_size_dict(__a , default_to_square=__a , param_name='''crop_size''' )
__UpperCAmelCase = do_resize
__UpperCAmelCase = do_rescale
__UpperCAmelCase = do_normalize
__UpperCAmelCase = do_center_crop
__UpperCAmelCase = crop_size
__UpperCAmelCase = size
__UpperCAmelCase = resample
__UpperCAmelCase = rescale_factor
__UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def snake_case__ ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray:
__UpperCAmelCase = get_size_dict(__a )
if "shortest_edge" in size:
__UpperCAmelCase = get_resize_output_image_size(__a , size=size['''shortest_edge'''] , default_to_square=__a )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def snake_case__ ( self : List[str] , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : str , ) -> np.ndarray:
__UpperCAmelCase = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__a , size=(size['''height'''], size['''width''']) , data_format=__a , **__a )
def snake_case__ ( self : Dict , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] ) -> np.ndarray:
return rescale(__a , scale=__a , data_format=__a , **__a )
def snake_case__ ( self : Optional[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : int , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def snake_case__ ( self : int , __a : ImageInput , __a : Optional[bool] = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : Dict , ) -> BatchFeature:
__UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase = get_size_dict(__a , param_name='''crop_size''' , default_to_square=__a )
__UpperCAmelCase = resample if resample is not None else self.resample
__UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase = image_std if image_std is not None else self.image_std
__UpperCAmelCase = size if size is not None else self.size
__UpperCAmelCase = get_size_dict(__a )
if not is_batched(__a ):
__UpperCAmelCase = [images]
if not valid_images(__a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
__UpperCAmelCase = [to_numpy_array(__a ) for image in images]
if do_resize:
__UpperCAmelCase = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__UpperCAmelCase = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__UpperCAmelCase = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__UpperCAmelCase = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__UpperCAmelCase = [to_channel_dimension_format(__a , __a ) for image in images]
__UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=__a , tensor_type=__a )
| 654 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCAmelCase : Any = ""
__lowerCAmelCase : int = ""
__lowerCAmelCase : Union[str, Any] = ""
__lowerCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase = get_dataset(UpperCamelCase__ , UpperCamelCase__ )
print('''Processing...''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = update_image_and_anno(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for index, image in enumerate(UpperCamelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCAmelCase = random_chars(3_2 )
__UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCAmelCase = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"""Success {index+1}/{len(UpperCamelCase__ )} with {file_name}""" )
__UpperCAmelCase = []
for anno in new_annos[index]:
__UpperCAmelCase = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(UpperCamelCase__ )
with open(f"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
for label_file in glob.glob(os.path.join(UpperCamelCase__ , '''*.txt''' ) ):
__UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(UpperCamelCase__ ) as in_file:
__UpperCAmelCase = in_file.readlines()
__UpperCAmelCase = os.path.join(UpperCamelCase__ , f"""{label_name}.jpg""" )
__UpperCAmelCase = []
for obj_list in obj_lists:
__UpperCAmelCase = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCamelCase__ )
labels.append(UpperCamelCase__ )
return img_paths, labels
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int = 1 ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = []
for idx in range(len(UpperCamelCase__ ) ):
__UpperCAmelCase = []
__UpperCAmelCase = img_list[idx]
path_list.append(UpperCamelCase__ )
__UpperCAmelCase = anno_list[idx]
__UpperCAmelCase = cva.imread(UpperCamelCase__ )
if flip_type == 1:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCamelCase__ )
new_imgs_list.append(UpperCamelCase__ )
return new_imgs_list, new_annos_lists, path_list
def lowerCAmelCase ( UpperCamelCase__ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 654 | 1 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCAmelCase : Optional[int] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class A ( unittest.TestCase ):
def snake_case__ ( self : Any , __a : str , __a : bool , __a : str = None , __a : list = None ) -> Tuple:
__UpperCAmelCase = None
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
__UpperCAmelCase = os.path.abspath('''examples''' )
for item in os.listdir(__a ):
if item not in EXCLUDE_EXAMPLES:
__UpperCAmelCase = os.path.join(__a , __a )
if os.path.isfile(__a ) and ".py" in item_path:
with self.subTest(
tested_script=__a , feature_script=__a , tested_section='''main()''' if parser_only else '''training_function()''' , ):
__UpperCAmelCase = compare_against_test(
os.path.join(__a , __a ) , __a , __a , __a )
__UpperCAmelCase = '''\n'''.join(__a )
if special_strings is not None:
for string in special_strings:
__UpperCAmelCase = diff.replace(__a , '''''' )
self.assertEqual(__a , '''''' )
def snake_case__ ( self : Optional[Any] ) -> str:
self.one_complete_example('''complete_nlp_example.py''' , __a )
self.one_complete_example('''complete_nlp_example.py''' , __a )
def snake_case__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
__UpperCAmelCase = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class A ( UpperCAmelCase ):
a_ = False
@classmethod
def snake_case__ ( cls : Tuple ) -> str:
super().setUpClass()
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Dict ) -> int:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
def snake_case__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
if torch.cuda.is_available():
__UpperCAmelCase = torch.cuda.device_count()
else:
__UpperCAmelCase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
else:
self.assertIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
@slow
def snake_case__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
__UpperCAmelCase = re.findall('''({.+})''' , __a )
__UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1]
__UpperCAmelCase = ast.literal_eval(__a )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case__ ( self : Dict ) -> int:
__UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCAmelCase = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__a , '''tracking''' ) ) )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 654 |
'''simple docstring'''
from pathlib import Path
import fire
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = Path(UpperCamelCase__ )
__UpperCAmelCase = Path(UpperCamelCase__ )
dest_dir.mkdir(exist_ok=UpperCamelCase__ )
for path in src_dir.iterdir():
__UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
__UpperCAmelCase = dest_dir.joinpath(path.name )
print(UpperCamelCase__ )
dest_path.open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 654 | 1 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = f"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase__ )
if number < 1:
__UpperCAmelCase = f"""Input value of [number={number}] must be > 0"""
raise ValueError(UpperCamelCase__ )
__UpperCAmelCase = 1
for i in range(1 , UpperCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 1 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__lowerCAmelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( UpperCAmelCase , UpperCAmelCase ):
@register_to_config
def __init__( self : str , __a : bool , __a : Optional[int] = None , __a : Optional[int] = None ) -> Any:
super().__init__()
__UpperCAmelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__UpperCAmelCase = torch.zeros(__a , __a )
else:
__UpperCAmelCase = None
__UpperCAmelCase = torch.nn.Parameter(__a )
class A ( UpperCAmelCase ):
a_ = 42
a_ = 42
a_ = 42
a_ = 42
a_ = 42
a_ = 42
def __init__( self : List[Any] , __a : VQModel , __a : CLIPTextModel , __a : CLIPTokenizer , __a : TransformeraDModel , __a : VQDiffusionScheduler , __a : LearnedClassifierFreeSamplingEmbeddings , ) -> Optional[int]:
super().__init__()
self.register_modules(
vqvae=__a , transformer=__a , text_encoder=__a , tokenizer=__a , scheduler=__a , learned_classifier_free_sampling_embeddings=__a , )
def snake_case__ ( self : List[str] , __a : int , __a : List[Any] , __a : Optional[Any] ) -> int:
__UpperCAmelCase = len(__a ) if isinstance(__a , __a ) else 1
# get prompt text embeddings
__UpperCAmelCase = self.tokenizer(
__a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
__UpperCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__UpperCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__UpperCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__UpperCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__UpperCAmelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__a )
# duplicate text embeddings for each generation per prompt
__UpperCAmelCase = prompt_embeds.repeat_interleave(__a , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__UpperCAmelCase = self.learned_classifier_free_sampling_embeddings.embeddings
__UpperCAmelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(__a , 1 , 1 )
else:
__UpperCAmelCase = [''''''] * batch_size
__UpperCAmelCase = text_input_ids.shape[-1]
__UpperCAmelCase = self.tokenizer(
__a , padding='''max_length''' , max_length=__a , truncation=__a , return_tensors='''pt''' , )
__UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__UpperCAmelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__UpperCAmelCase = negative_prompt_embeds.shape[1]
__UpperCAmelCase = negative_prompt_embeds.repeat(1 , __a , 1 )
__UpperCAmelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __a , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__UpperCAmelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Optional[Any] , __a : Union[str, List[str]] , __a : int = 1_0_0 , __a : float = 5.0 , __a : float = 1.0 , __a : int = 1 , __a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(__a , __a ):
__UpperCAmelCase = 1
elif isinstance(__a , __a ):
__UpperCAmelCase = len(__a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__a )}""" )
__UpperCAmelCase = batch_size * num_images_per_prompt
__UpperCAmelCase = guidance_scale > 1.0
__UpperCAmelCase = self._encode_prompt(__a , __a , __a )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__a , __a ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__a )}.""" )
# get the initial completely masked latents unless the user supplied it
__UpperCAmelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__UpperCAmelCase = self.transformer.num_vector_embeds - 1
__UpperCAmelCase = torch.full(__a , __a ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
__UpperCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__a , device=self.device )
__UpperCAmelCase = self.scheduler.timesteps.to(self.device )
__UpperCAmelCase = latents
for i, t in enumerate(self.progress_bar(__a ) ):
# expand the sample if we are doing classifier free guidance
__UpperCAmelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__UpperCAmelCase = self.transformer(__a , encoder_hidden_states=__a , timestep=__a ).sample
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase = model_output.chunk(2 )
__UpperCAmelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__a , dim=1 , keepdim=__a )
__UpperCAmelCase = self.truncate(__a , __a )
# remove `log(0)`'s (`-inf`s)
__UpperCAmelCase = model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase = self.scheduler.step(__a , timestep=__a , sample=__a , generator=__a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__a , __a , __a )
__UpperCAmelCase = self.vqvae.config.vq_embed_dim
__UpperCAmelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__UpperCAmelCase = self.vqvae.quantize.get_codebook_entry(__a , shape=__a )
__UpperCAmelCase = self.vqvae.decode(__a , force_not_quantize=__a ).sample
__UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
def snake_case__ ( self : List[Any] , __a : torch.FloatTensor , __a : float ) -> torch.FloatTensor:
__UpperCAmelCase , __UpperCAmelCase = torch.sort(__a , 1 , descending=__a )
__UpperCAmelCase = torch.exp(__a )
__UpperCAmelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__UpperCAmelCase = torch.full_like(keep_mask[:, 0:1, :] , __a )
__UpperCAmelCase = torch.cat((all_true, keep_mask) , dim=1 )
__UpperCAmelCase = keep_mask[:, :-1, :]
__UpperCAmelCase = keep_mask.gather(1 , indices.argsort(1 ) )
__UpperCAmelCase = log_p_x_0.clone()
__UpperCAmelCase = -torch.inf # -inf = log(0)
return rv
| 654 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class A ( nn.Module ):
def __init__( self : Optional[Any] ) -> int:
super().__init__()
__UpperCAmelCase = nn.Linear(3 , 4 )
__UpperCAmelCase = nn.BatchNormad(4 )
__UpperCAmelCase = nn.Linear(4 , 5 )
def snake_case__ ( self : List[str] , __a : Optional[int] ) -> Optional[int]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A ( unittest.TestCase ):
def snake_case__ ( self : Optional[int] ) -> Any:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def snake_case__ ( self : Any ) -> int:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__a : Optional[int] ):
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : Any ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Union[str, Any] , __a : int ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__a ) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def snake_case__ ( self : Any ) -> List[Any]:
__UpperCAmelCase = torch.cuda.memory_allocated()
__UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __a )
__UpperCAmelCase = release_memory(__a )
self.assertEqual(torch.cuda.memory_allocated() , __a )
| 654 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class A ( UpperCAmelCase ):
def __init__( self : int , __a : int , __a : str ) -> List[str]:
__UpperCAmelCase = params
__UpperCAmelCase = np.array(__a )
__UpperCAmelCase = np.array([len(__a ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : List[str] , __a : Optional[int] ) -> Dict:
return (self.token_ids[index], self.lengths[index])
def __len__( self : int ) -> Union[str, Any]:
return len(self.lengths )
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def snake_case__ ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase = self.params.max_model_input_size
__UpperCAmelCase = self.lengths > max_len
logger.info(f"""Splitting {sum(__a )} too long sequences.""" )
def divide_chunks(__a : List[Any] , __a : List[str] ):
return [l[i : i + n] for i in range(0 , len(__a ) , __a )]
__UpperCAmelCase = []
__UpperCAmelCase = []
if self.params.mlm:
__UpperCAmelCase , __UpperCAmelCase = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
__UpperCAmelCase , __UpperCAmelCase = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__UpperCAmelCase = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__UpperCAmelCase = np.insert(__a , 0 , __a )
if sub_s[-1] != sep_id:
__UpperCAmelCase = np.insert(__a , len(__a ) , __a )
assert len(__a ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__a )
new_tok_ids.extend(__a )
new_lengths.extend([len(__a ) for l in sub_seqs] )
__UpperCAmelCase = np.array(__a )
__UpperCAmelCase = np.array(__a )
def snake_case__ ( self : Any ) -> Dict:
__UpperCAmelCase = len(self )
__UpperCAmelCase = self.lengths > 1_1
__UpperCAmelCase = self.token_ids[indices]
__UpperCAmelCase = self.lengths[indices]
__UpperCAmelCase = len(self )
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def snake_case__ ( self : Tuple ) -> Any:
if "unk_token" not in self.params.special_tok_ids:
return
else:
__UpperCAmelCase = self.params.special_tok_ids['''unk_token''']
__UpperCAmelCase = len(self )
__UpperCAmelCase = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__UpperCAmelCase = (unk_occs / self.lengths) < 0.5
__UpperCAmelCase = self.token_ids[indices]
__UpperCAmelCase = self.lengths[indices]
__UpperCAmelCase = len(self )
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]:
if not self.params.is_master:
return
logger.info(f"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def snake_case__ ( self : int , __a : Optional[Any] ) -> Dict:
__UpperCAmelCase = [t[0] for t in batch]
__UpperCAmelCase = [t[1] for t in batch]
assert len(__a ) == len(__a )
# Max for paddings
__UpperCAmelCase = max(__a )
# Pad token ids
if self.params.mlm:
__UpperCAmelCase = self.params.special_tok_ids['''pad_token''']
else:
__UpperCAmelCase = self.params.special_tok_ids['''unk_token''']
__UpperCAmelCase = [list(t.astype(__a ) ) + [pad_idx] * (max_seq_len_ - len(__a )) for t in token_ids]
assert len(tk_ ) == len(__a )
assert all(len(__a ) == max_seq_len_ for t in tk_ )
__UpperCAmelCase = torch.tensor(tk_ ) # (bs, max_seq_len_)
__UpperCAmelCase = torch.tensor(__a ) # (bs)
return tk_t, lg_t
| 654 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = u
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = temp * (u - i)
return temp
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = int(input('''enter the numbers of values: ''' ) )
__UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
__UpperCAmelCase = 0
print('''enter the values of parameters in a list: ''' )
__UpperCAmelCase = list(map(UpperCamelCase__ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = float(input() )
__UpperCAmelCase = int(input('''enter the value to interpolate: ''' ) )
__UpperCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
__UpperCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
__UpperCAmelCase = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 654 | 1 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class A ( UpperCAmelCase ):
a_ = ['''audio_values''', '''audio_mask''']
def __init__( self : int , __a : Dict=2_0_4_8 , __a : List[str]=1 , __a : str=[1_6, 1_6] , __a : str=1_2_8 , __a : Any=4_4_1_0_0 , __a : Dict=8_6 , __a : int=2_0_4_8 , __a : List[Any]=0.0 , **__a : List[Any] , ) -> List[Any]:
super().__init__(
feature_size=__a , sampling_rate=__a , padding_value=__a , **__a , )
__UpperCAmelCase = spectrogram_length
__UpperCAmelCase = num_channels
__UpperCAmelCase = patch_size
__UpperCAmelCase = feature_size // self.patch_size[1]
__UpperCAmelCase = n_fft
__UpperCAmelCase = sampling_rate // hop_length_to_sampling_rate
__UpperCAmelCase = sampling_rate
__UpperCAmelCase = padding_value
__UpperCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__a , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__a , norm='''slaney''' , mel_scale='''slaney''' , ).T
def snake_case__ ( self : Optional[Any] , __a : np.array ) -> np.ndarray:
__UpperCAmelCase = spectrogram(
__a , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=8_0.0 , )
__UpperCAmelCase = log_spec[:, :-1]
__UpperCAmelCase = log_spec - 2_0.0
__UpperCAmelCase = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Union[str, Any] , __a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a : Optional[Union[str, TensorType]] = None , __a : Optional[bool] = True , __a : Optional[int] = None , __a : bool = False , __a : bool = False , **__a : Dict , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__UpperCAmelCase = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__UpperCAmelCase = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
__UpperCAmelCase = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCAmelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__UpperCAmelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __a ):
__UpperCAmelCase = [np.asarray(__a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__UpperCAmelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__UpperCAmelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__UpperCAmelCase = np.array(__a ).astype(np.floataa )
# convert into correct format for padding
__UpperCAmelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__UpperCAmelCase = np.ones([len(__a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__UpperCAmelCase = padded_audio_features * self.padding_value
for i in range(len(__a ) ):
__UpperCAmelCase = audio_features[i]
__UpperCAmelCase = feature
# return as BatchFeature
if return_attention_mask:
__UpperCAmelCase = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
__UpperCAmelCase = {'''audio_values''': padded_audio_features}
__UpperCAmelCase = BatchFeature(data=__a , tensor_type=__a )
return encoded_inputs
| 654 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__UpperCAmelCase = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
__UpperCAmelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
__UpperCAmelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = dct.pop(UpperCamelCase__ )
__UpperCAmelCase = val
def lowerCAmelCase ( UpperCamelCase__ : Dict ):
"""simple docstring"""
if "handwritten" in checkpoint_url:
__UpperCAmelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCamelCase__ )
__UpperCAmelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__UpperCAmelCase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = 4_0_9_6
__UpperCAmelCase = 2_4
__UpperCAmelCase = 1_6
__UpperCAmelCase = 1_0_2_4
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = False
__UpperCAmelCase = '''relu'''
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
# load HuggingFace model
__UpperCAmelCase = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ )
__UpperCAmelCase = TrOCRForCausalLM(UpperCamelCase__ )
__UpperCAmelCase = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
__UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , check_hash=UpperCamelCase__ )['''model''']
__UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
__UpperCAmelCase = val
else:
__UpperCAmelCase = val
# load state dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
__UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size )
__UpperCAmelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
__UpperCAmelCase = TrOCRProcessor(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = processor(images=prepare_img(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
__UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__UpperCAmelCase = model(pixel_values=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ )
__UpperCAmelCase = outputs.logits
__UpperCAmelCase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , UpperCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 654 | 1 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase ( UpperCamelCase__ : int = 1_0_0_0_0_0_0 , UpperCamelCase__ : int = 1_0 ):
"""simple docstring"""
__UpperCAmelCase = defaultdict(UpperCamelCase__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__UpperCAmelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
__UpperCAmelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(UpperCamelCase__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 654 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A ( unittest.TestCase ):
def snake_case__ ( self : List[Any] , __a : List[str] , __a : Optional[Any] ) -> List[Any]:
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(__a ) for s in shape] )}.npy"""
def snake_case__ ( self : Dict ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case__ ( self : Optional[Any] , __a : Tuple=0 , __a : List[Any]=(4, 4, 6_4, 6_4) , __a : Optional[Any]=False ) -> Tuple:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return image
def snake_case__ ( self : int , __a : Optional[Any]=False , __a : Optional[Any]="CompVis/stable-diffusion-v1-4" ) -> Any:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = '''bf16''' if fpaa else None
__UpperCAmelCase , __UpperCAmelCase = FlaxUNetaDConditionModel.from_pretrained(
__a , subfolder='''unet''' , dtype=__a , revision=__a )
return model, params
def snake_case__ ( self : str , __a : int=0 , __a : Tuple=(4, 7_7, 7_6_8) , __a : Optional[int]=False ) -> Union[str, Any]:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[1_7, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1_0_0_0, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def snake_case__ ( self : Tuple , __a : Tuple , __a : str , __a : Optional[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[1_7, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1_0_0_0, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def snake_case__ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , shape=(4, 4, 9_6, 9_6) , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , shape=(4, 7_7, 1_0_2_4) , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
| 654 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCAmelCase ( UpperCamelCase__ : Union[List, PIL.Image.Image, torch.Tensor] ):
"""simple docstring"""
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , UpperCamelCase__ , )
if isinstance(UpperCamelCase__ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
__UpperCAmelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
__UpperCAmelCase , __UpperCAmelCase = image[0].size
__UpperCAmelCase , __UpperCAmelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__UpperCAmelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__UpperCAmelCase = np.concatenate(UpperCamelCase__ , axis=0 )
__UpperCAmelCase = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0
__UpperCAmelCase = image.transpose(0 , 3 , 1 , 2 )
__UpperCAmelCase = 2.0 * image - 1.0
__UpperCAmelCase = torch.from_numpy(UpperCamelCase__ )
elif isinstance(image[0] , torch.Tensor ):
__UpperCAmelCase = torch.cat(UpperCamelCase__ , dim=0 )
return image
def lowerCAmelCase ( UpperCamelCase__ : Union[List, PIL.Image.Image, torch.Tensor] ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , torch.Tensor ):
return mask
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
__UpperCAmelCase = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__UpperCAmelCase , __UpperCAmelCase = mask[0].size
__UpperCAmelCase , __UpperCAmelCase = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
__UpperCAmelCase = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__UpperCAmelCase = np.concatenate(UpperCamelCase__ , axis=0 )
__UpperCAmelCase = mask.astype(np.floataa ) / 2_55.0
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = torch.from_numpy(UpperCamelCase__ )
elif isinstance(mask[0] , torch.Tensor ):
__UpperCAmelCase = torch.cat(UpperCamelCase__ , dim=0 )
return mask
class A ( UpperCAmelCase ):
a_ = 42
a_ = 42
def __init__( self : Union[str, Any] , __a : Optional[int] , __a : Dict ) -> int:
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__( self : Tuple , __a : Union[torch.Tensor, PIL.Image.Image] , __a : Union[torch.Tensor, PIL.Image.Image] , __a : int = 2_5_0 , __a : float = 0.0 , __a : int = 1_0 , __a : int = 1_0 , __a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a : Optional[str] = "pil" , __a : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
__UpperCAmelCase = image
__UpperCAmelCase = _preprocess_image(__a )
__UpperCAmelCase = original_image.to(device=self.device , dtype=self.unet.dtype )
__UpperCAmelCase = _preprocess_mask(__a )
__UpperCAmelCase = mask_image.to(device=self.device , dtype=self.unet.dtype )
__UpperCAmelCase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__a , __a ) and len(__a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__UpperCAmelCase = original_image.shape
__UpperCAmelCase = randn_tensor(__a , generator=__a , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__a , __a , __a , self.device )
__UpperCAmelCase = eta
__UpperCAmelCase = self.scheduler.timesteps[0] + 1
__UpperCAmelCase = generator[0] if isinstance(__a , __a ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__UpperCAmelCase = self.unet(__a , __a ).sample
# compute previous image: x_t -> x_t-1
__UpperCAmelCase = self.scheduler.step(__a , __a , __a , __a , __a , __a ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__UpperCAmelCase = self.scheduler.undo_step(__a , __a , __a )
__UpperCAmelCase = t
__UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 654 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[int] = "examples/"
__lowerCAmelCase : Dict = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__lowerCAmelCase : List[str] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__lowerCAmelCase : int = "README.md"
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
"""simple docstring"""
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase , __UpperCAmelCase = REPLACE_PATTERNS[pattern]
__UpperCAmelCase = replace.replace('''VERSION''' , UpperCamelCase__ )
__UpperCAmelCase = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern='''examples''' )
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Any=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not patch:
update_version_in_examples(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
__UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.readlines()
# Find the start of the list.
__UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase__ ).groups()[0]
return packaging.version.parse(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Any=False ):
"""simple docstring"""
__UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__UpperCAmelCase = default_version.base_version
elif patch:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = get_version()
__UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
__UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__lowerCAmelCase : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 654 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = u
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = temp * (u - i)
return temp
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = int(input('''enter the numbers of values: ''' ) )
__UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
__UpperCAmelCase = 0
print('''enter the values of parameters in a list: ''' )
__UpperCAmelCase = list(map(UpperCamelCase__ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = float(input() )
__UpperCAmelCase = int(input('''enter the value to interpolate: ''' ) )
__UpperCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
__UpperCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
__UpperCAmelCase = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 654 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__UpperCAmelCase = len(UpperCamelCase__ )
__UpperCAmelCase = max(UpperCamelCase__ )
__UpperCAmelCase = min(UpperCamelCase__ )
# create the counting array
__UpperCAmelCase = coll_max + 1 - coll_min
__UpperCAmelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__UpperCAmelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
__UpperCAmelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__lowerCAmelCase : str = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 654 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class A :
def __init__( self : Union[str, Any] ) -> str:
__UpperCAmelCase = {}
def snake_case__ ( self : Any , __a : str ) -> None:
__UpperCAmelCase = {}
def snake_case__ ( self : Optional[Any] , __a : str , __a : str , __a : float ) -> None:
if nodea not in self.connections:
self.add_node(__a )
if nodea not in self.connections:
self.add_node(__a )
__UpperCAmelCase = probability
def snake_case__ ( self : Dict ) -> list[str]:
return list(self.connections )
def snake_case__ ( self : Any , __a : str ) -> str:
__UpperCAmelCase = 0
__UpperCAmelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : list[tuple[str, str, float]] , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = Counter(graph.get_nodes() )
__UpperCAmelCase = start
for _ in range(UpperCamelCase__ ):
__UpperCAmelCase = graph.transition(UpperCamelCase__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase ( UpperCamelCase__ : str = "AAPL" ):
"""simple docstring"""
__UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' )
__UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 654 | 1 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
return number | (1 << position)
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
return number & ~(1 << position)
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
return number ^ (1 << position)
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
__UpperCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i]
__UpperCAmelCase = []
__UpperCAmelCase = 0
__UpperCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__UpperCAmelCase = []
__UpperCAmelCase = -1
for i in range(UpperCamelCase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__UpperCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__UpperCAmelCase = 0
__UpperCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__lowerCAmelCase : List[Any] = 4
__lowerCAmelCase : List[Any] = [2, 5, 3, 7]
__lowerCAmelCase : Tuple = [0, 0, 0, 0]
__lowerCAmelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase : Dict = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 654 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase ) , '''Tatoeba directory does not exist.''' )
class A ( unittest.TestCase ):
@cached_property
def snake_case__ ( self : Dict ) -> str:
__UpperCAmelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__a )
@slow
def snake_case__ ( self : List[Any] ) -> Tuple:
self.resolver.convert_models(['''heb-eng'''] )
@slow
def snake_case__ ( self : List[Any] ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=__a )
assert mmeta["long_pair"] == "heb-eng"
| 654 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[str] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : List[str] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : List[Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Optional[Any] , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Tuple , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : str , **__a : Tuple ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : int ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : List[str] , **__a : Optional[int] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Any ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Dict , **__a : List[str] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Optional[int] , **__a : Optional[int] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[str] , **__a : List[str] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[int] , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : str ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Any ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : str , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Optional[int] , **__a : Union[str, Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Union[str, Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Union[str, Any] , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : int , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : str ) -> Dict:
requires_backends(cls , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : str , **UpperCamelCase__ : str ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : str , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : str , **__a : List[str] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : List[Any] , **__a : List[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : Tuple ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : str , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : str ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : Tuple ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Tuple , **__a : str ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : str , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : int , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : str , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : int , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Union[str, Any] , **__a : Optional[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[Any] , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Dict ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Union[str, Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : Dict ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Tuple , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : Any ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Optional[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Union[str, Any] , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : Optional[int] , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Any , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : int , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Tuple , **__a : Optional[int] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : Tuple ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Union[str, Any] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[Any] , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : int , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Any , **__a : int ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Dict ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : int , **__a : Optional[int] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Dict , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Any , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : Tuple , **__a : Optional[int] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Optional[Any] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : Dict ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Union[str, Any] , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Any , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Union[str, Any] , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : List[Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Dict , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : Union[str, Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : int ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Optional[Any] , **__a : int ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[Any] , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Optional[Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[int] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[str] , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Tuple , **__a : Tuple ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[str] , **__a : int ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Tuple , **__a : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Any , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : str ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[str] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : str , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[Any] , **__a : List[str] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[str] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : str , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Tuple ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Any , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Tuple ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : int , **__a : Optional[Any] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Optional[int] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[str] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : List[str] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
| 654 | 1 |
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class A ( unittest.TestCase ):
def __init__( self : Tuple , __a : Dict , __a : Optional[int]=2 , __a : List[Any]=5_6 , __a : int=True , __a : Union[str, Any]=True , __a : str=True , __a : Optional[int]=True , __a : Tuple=9_9 , __a : List[Any]=3_2 , __a : Optional[Any]=2 , __a : str=2 , __a : Any=7 , __a : Union[str, Any]="gelu_new" , __a : str=0.1 , __a : Optional[Any]=0.1 , __a : List[str]=5_1_2 , __a : Union[str, Any]=1_6 , __a : Tuple=2 , __a : Any=0.0_2 , __a : List[str]=4 , __a : Union[str, Any]="block_sparse" , __a : Optional[Any]=True , __a : Any=False , __a : List[Any]=2 , __a : Dict=3 , ) -> Tuple:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_attention_mask
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_choices
__UpperCAmelCase = rescale_embeddings
__UpperCAmelCase = attention_type
__UpperCAmelCase = use_bias
__UpperCAmelCase = block_size
__UpperCAmelCase = num_random_blocks
def snake_case__ ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_attention_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self : Dict ) -> Optional[Any]:
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class A ( UpperCAmelCase , unittest.TestCase ):
a_ = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
a_ = False
a_ = False
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self : Any ) -> Dict:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self : int ) -> str:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self : Union[str, Any] ) -> str:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self : Any ) -> Tuple:
super().test_hidden_states_output()
@slow
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(__a )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase = self._prepare_for_class(__a , __a )
__UpperCAmelCase = model_class(__a )
@jax.jit
def model_jitted(__a : Any , __a : Any=None , **__a : List[str] ):
return model(input_ids=__a , attention_mask=__a , **__a )
with self.subTest('''JIT Enabled''' ):
__UpperCAmelCase = model_jitted(**__a ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__UpperCAmelCase = model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self : Optional[int] , __a : Any , __a : Dict , __a : Optional[int] , __a : Tuple=1e-5 , __a : Tuple="outputs" , __a : List[str]=None ) -> List[Any]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(__a , __a , __a , __a , __a , __a )
| 654 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 654 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase ( UpperCamelCase__ : list[int] ): # This function is recursive
"""simple docstring"""
__UpperCAmelCase = len(UpperCamelCase__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__UpperCAmelCase = array[0]
__UpperCAmelCase = False
__UpperCAmelCase = 1
__UpperCAmelCase = []
while not is_found and i < array_length:
if array[i] < pivot:
__UpperCAmelCase = True
__UpperCAmelCase = [element for element in array[i:] if element >= array[i]]
__UpperCAmelCase = longest_subsequence(UpperCamelCase__ )
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
__UpperCAmelCase = temp_array
else:
i += 1
__UpperCAmelCase = [element for element in array[1:] if element >= pivot]
__UpperCAmelCase = [pivot, *longest_subsequence(UpperCamelCase__ )]
if len(UpperCamelCase__ ) > len(UpperCamelCase__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[Any] = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 | 1 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Optional[Any] = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class A ( UpperCAmelCase ):
def __init__( self : Any , __a : str , __a : Optional[Any] , __a : str=None , __a : Union[str, Any]=1 ) -> List[str]:
__UpperCAmelCase = tokenizer
__UpperCAmelCase = dataset
__UpperCAmelCase = len(__a ) if n_tasks is None else n_tasks
__UpperCAmelCase = n_copies
def __iter__( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
__UpperCAmelCase = self.tokenizer(__a , padding=__a , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class A ( UpperCAmelCase ):
def __init__( self : Dict , __a : List[Any] , __a : Optional[int] , __a : Optional[Any] ) -> Tuple:
__UpperCAmelCase = start_length
__UpperCAmelCase = eof_strings
__UpperCAmelCase = tokenizer
def __call__( self : Optional[Any] , __a : List[Any] , __a : Union[str, Any] , **__a : List[Any] ) -> Tuple:
__UpperCAmelCase = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
__UpperCAmelCase = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__a )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = re.split('''(%s)''' % '''|'''.join(UpperCamelCase__ ) , UpperCamelCase__ )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int=2_0 , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase = defaultdict(UpperCamelCase__ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(UpperCamelCase__ ) ):
with torch.no_grad():
__UpperCAmelCase = batch['''ids'''].shape[-1]
__UpperCAmelCase = accelerator.unwrap_model(UpperCamelCase__ ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=UpperCamelCase__ , **UpperCamelCase__ )
# each task is generated batch_size times
__UpperCAmelCase = batch['''task_id'''].repeat(UpperCamelCase__ )
__UpperCAmelCase = accelerator.pad_across_processes(
UpperCamelCase__ , dim=1 , pad_index=tokenizer.pad_token_id )
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((generated_tokens, generated_tasks) )
__UpperCAmelCase = generated_tokens.cpu().numpy()
__UpperCAmelCase = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(UpperCamelCase__ , UpperCamelCase__ ):
gen_token_dict[task].append(UpperCamelCase__ )
__UpperCAmelCase = [[] for _ in range(UpperCamelCase__ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__UpperCAmelCase = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
code_gens[task].append(remove_last_block(UpperCamelCase__ ) )
return code_gens
def lowerCAmelCase ( ):
"""simple docstring"""
# Setup configuration
__UpperCAmelCase = HfArgumentParser(UpperCamelCase__ )
__UpperCAmelCase = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__UpperCAmelCase = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__UpperCAmelCase = '''false'''
if args.num_workers is None:
__UpperCAmelCase = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__UpperCAmelCase = Accelerator()
set_seed(args.seed , device_specific=UpperCamelCase__ )
# Load model and tokenizer
__UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
__UpperCAmelCase = tokenizer.eos_token
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__UpperCAmelCase = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , UpperCamelCase__ , UpperCamelCase__ )] ),
}
# Load evaluation dataset and metric
__UpperCAmelCase = load_dataset('''openai_humaneval''' )
__UpperCAmelCase = load_metric('''code_eval''' )
__UpperCAmelCase = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
__UpperCAmelCase = args.n_samples // args.batch_size
__UpperCAmelCase = TokenizedDataset(UpperCamelCase__ , human_eval['''test'''] , n_copies=UpperCamelCase__ , n_tasks=UpperCamelCase__ )
# do not confuse args.batch_size, which is actually the num_return_sequences
__UpperCAmelCase = DataLoader(UpperCamelCase__ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__UpperCAmelCase = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = complete_code(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , n_tasks=UpperCamelCase__ , batch_size=args.batch_size , **UpperCamelCase__ , )
if accelerator.is_main_process:
__UpperCAmelCase = []
for task in tqdm(range(UpperCamelCase__ ) ):
__UpperCAmelCase = human_eval['''test'''][task]['''test''']
__UpperCAmelCase = f"""check({human_eval['test'][task]['entry_point']})"""
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
__UpperCAmelCase , __UpperCAmelCase = code_eval_metric.compute(
references=UpperCamelCase__ , predictions=UpperCamelCase__ , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 654 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , ):
"""simple docstring"""
__UpperCAmelCase = {}
if train_file is not None:
__UpperCAmelCase = [train_file]
if eval_file is not None:
__UpperCAmelCase = [eval_file]
if test_file is not None:
__UpperCAmelCase = [test_file]
__UpperCAmelCase = datasets.load_dataset('''csv''' , data_files=UpperCamelCase__ )
__UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() )
__UpperCAmelCase = features_name.pop(UpperCamelCase__ )
__UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
__UpperCAmelCase = {label: i for i, label in enumerate(UpperCamelCase__ )}
__UpperCAmelCase = tokenizer.model_input_names
__UpperCAmelCase = {}
if len(UpperCamelCase__ ) == 1:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' ) , batched=UpperCamelCase__ , )
elif len(UpperCamelCase__ ) == 2:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , ) , batched=UpperCamelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class A :
a_ = field(metadata={'''help''': '''Which column contains the label'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the training file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the development file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the test file'''} )
a_ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
a_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def lowerCAmelCase ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase__ ) , labelaid=UpperCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict:
__UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__UpperCAmelCase = TFTrainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCAmelCase = trainer.evaluate()
__UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(UpperCamelCase__ )
return results
if __name__ == "__main__":
main()
| 654 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowerCAmelCase ( UpperCamelCase__ : List[Any] ):
"""simple docstring"""
if "model" in orig_key:
__UpperCAmelCase = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
__UpperCAmelCase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
__UpperCAmelCase = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
__UpperCAmelCase = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
__UpperCAmelCase = orig_key.split('''.''' )[0].split('''_''' )[-1]
__UpperCAmelCase = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
__UpperCAmelCase = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
__UpperCAmelCase = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
__UpperCAmelCase = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
__UpperCAmelCase = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
__UpperCAmelCase = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
__UpperCAmelCase = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
__UpperCAmelCase = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
__UpperCAmelCase = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
__UpperCAmelCase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
__UpperCAmelCase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
__UpperCAmelCase = '''yoso.''' + orig_key
return orig_key
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__UpperCAmelCase = orig_state_dict.pop(UpperCamelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
__UpperCAmelCase = val
__UpperCAmelCase = orig_state_dict['''cls.predictions.decoder.bias''']
__UpperCAmelCase = torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model_state_dict''']
__UpperCAmelCase = YosoConfig.from_json_file(UpperCamelCase__ )
__UpperCAmelCase = YosoForMaskedLM(UpperCamelCase__ )
__UpperCAmelCase = convert_checkpoint_helper(config.max_position_embeddings , UpperCamelCase__ )
print(model.load_state_dict(UpperCamelCase__ ) )
model.eval()
model.save_pretrained(UpperCamelCase__ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Any = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 654 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__( self : List[Any] , __a : Any , ) -> Dict:
__UpperCAmelCase = parent
__UpperCAmelCase = 1_3
__UpperCAmelCase = 7
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = 9_9
__UpperCAmelCase = 3_2
__UpperCAmelCase = 2
__UpperCAmelCase = 4
__UpperCAmelCase = 3_7
__UpperCAmelCase = '''gelu'''
__UpperCAmelCase = 0.1
__UpperCAmelCase = 0.1
__UpperCAmelCase = 5_1_2
__UpperCAmelCase = 1_6
__UpperCAmelCase = 2
__UpperCAmelCase = 0.0_2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
__UpperCAmelCase = None
def snake_case__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Union[str, Any] , __a : List[str] , __a : int , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : int ) -> Any:
__UpperCAmelCase = TFDistilBertModel(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
__UpperCAmelCase = [input_ids, input_mask]
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Tuple , __a : List[Any] , __a : int , __a : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : List[Any] ) -> int:
__UpperCAmelCase = TFDistilBertForMaskedLM(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[int] , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Optional[int] ) -> Dict:
__UpperCAmelCase = TFDistilBertForQuestionAnswering(config=__a )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Any , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : Dict , __a : int , __a : List[Any] ) -> Dict:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForSequenceClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] , __a : Dict ) -> str:
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = TFDistilBertForMultipleChoice(__a )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : Tuple , __a : int , __a : Optional[int] , __a : Optional[int] ) -> int:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForTokenClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : str ) -> Any:
__UpperCAmelCase = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = TFDistilBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , dim=3_7 )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ) -> str:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__a )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__a )
def snake_case__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__a )
def snake_case__ ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a )
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a )
def snake_case__ ( self : List[str] ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__a )
@slow
def snake_case__ ( self : Dict ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCAmelCase = TFDistilBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class A ( unittest.TestCase ):
@slow
def snake_case__ ( self : int ) -> Dict:
__UpperCAmelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase = model(__a )[0]
__UpperCAmelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , __a )
__UpperCAmelCase = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
| 654 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class A :
def __init__( self : str , __a : Union[str, Any] , __a : List[str]=1_3 , __a : List[Any]=7 , __a : Optional[Any]=True , __a : Tuple=True , __a : str=True , __a : Union[str, Any]=True , __a : int=9_9 , __a : Optional[int]=3_2 , __a : List[Any]=2 , __a : int=4 , __a : Tuple=3_7 , __a : Optional[int]="gelu" , __a : List[str]=0.1 , __a : int=0.1 , __a : str=5_1_2 , __a : Union[str, Any]=1_6 , __a : Tuple=2 , __a : int=0.0_2 , __a : Union[str, Any]=3 , __a : Any=4 , __a : Any=None , __a : Union[str, Any]=0 , ) -> Union[str, Any]:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_input_mask
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_labels
__UpperCAmelCase = num_choices
__UpperCAmelCase = scope
__UpperCAmelCase = projection_dim
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
__UpperCAmelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : List[Any] , __a : List[Any] , __a : str , __a : Dict , __a : Union[str, Any] , __a : List[Any] , __a : int , __a : Dict ) -> str:
__UpperCAmelCase = TFDPRContextEncoder(config=__a )
__UpperCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a )
__UpperCAmelCase = model(__a , token_type_ids=__a )
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case__ ( self : Dict , __a : Union[str, Any] , __a : Tuple , __a : List[str] , __a : List[Any] , __a : int , __a : List[Any] , __a : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase = TFDPRQuestionEncoder(config=__a )
__UpperCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a )
__UpperCAmelCase = model(__a , token_type_ids=__a )
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case__ ( self : Optional[int] , __a : List[str] , __a : int , __a : List[str] , __a : Tuple , __a : Optional[int] , __a : Dict , __a : Tuple ) -> Tuple:
__UpperCAmelCase = TFDPRReader(config=__a )
__UpperCAmelCase = model(__a , attention_mask=__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
a_ = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def snake_case__ ( self : List[Any] ) -> int:
__UpperCAmelCase = TFDPRModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , hidden_size=3_7 )
def snake_case__ ( self : List[str] ) -> List[str]:
self.config_tester.run_common_tests()
def snake_case__ ( self : Union[str, Any] ) -> Tuple:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__a )
def snake_case__ ( self : Tuple ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__a )
def snake_case__ ( self : Optional[Any] ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__a )
@slow
def snake_case__ ( self : List[Any] ) -> List[Any]:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = TFDPRContextEncoder.from_pretrained(__a )
self.assertIsNotNone(__a )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = TFDPRContextEncoder.from_pretrained(__a )
self.assertIsNotNone(__a )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = TFDPRQuestionEncoder.from_pretrained(__a )
self.assertIsNotNone(__a )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = TFDPRReader.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class A ( unittest.TestCase ):
@slow
def snake_case__ ( self : int ) -> Dict:
__UpperCAmelCase = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
__UpperCAmelCase = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
__UpperCAmelCase = model(__a )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
__UpperCAmelCase = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 654 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowerCAmelCase : List[Any] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 | 1 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class A :
def __init__( self : Optional[int] , __a : Any , __a : List[Any]=1_3 , __a : Any=7 , __a : str=True , __a : Optional[Any]=True , __a : Optional[Any]=True , __a : Optional[int]=True , __a : Optional[Any]=9_9 , __a : Any=6_4 , __a : Any=3_2 , __a : int=5 , __a : int=4 , __a : List[Any]=3_7 , __a : Optional[int]="gelu" , __a : Optional[Any]=0.1 , __a : Union[str, Any]=0.1 , __a : str=5_1_2 , __a : List[Any]=1_6 , __a : Optional[int]=2 , __a : int=0.0_2 , __a : Dict=3 , __a : Optional[Any]=4 , __a : Any=None , ) -> str:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_input_mask
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = embedding_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_labels
__UpperCAmelCase = num_choices
__UpperCAmelCase = scope
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : List[str] ) -> Dict:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def snake_case__ ( self : List[Any] , __a : Any , __a : Optional[Any] , __a : List[Any] , __a : Dict , __a : int , __a : Optional[int] , __a : Tuple ) -> Union[str, Any]:
__UpperCAmelCase = MegatronBertModel(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a )
__UpperCAmelCase = model(__a , token_type_ids=__a )
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : List[str] , __a : Union[str, Any] , __a : Tuple , __a : Any , __a : List[Any] ) -> Optional[int]:
__UpperCAmelCase = MegatronBertForMaskedLM(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[int] , __a : Any , __a : Any , __a : Tuple , __a : int , __a : Optional[int] ) -> List[Any]:
__UpperCAmelCase = MegatronBertForCausalLM(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Any , __a : Tuple , __a : Tuple , __a : List[str] , __a : Optional[int] , __a : Union[str, Any] , __a : Union[str, Any] , __a : int ) -> Tuple:
__UpperCAmelCase = MegatronBertForNextSentencePrediction(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def snake_case__ ( self : List[Any] , __a : Tuple , __a : List[str] , __a : Tuple , __a : Tuple , __a : Any , __a : Optional[Any] , __a : int ) -> List[str]:
__UpperCAmelCase = MegatronBertForPreTraining(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , next_sentence_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def snake_case__ ( self : Dict , __a : List[str] , __a : Optional[Any] , __a : Tuple , __a : str , __a : Dict , __a : Optional[int] , __a : Any ) -> List[str]:
__UpperCAmelCase = MegatronBertForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : str , __a : Dict , __a : List[Any] , __a : str , __a : Union[str, Any] , __a : List[str] , __a : Any , __a : List[str] ) -> int:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = MegatronBertForSequenceClassification(__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , __a : List[Any] , __a : List[Any] , __a : Any , __a : List[str] , __a : Optional[int] , __a : Optional[int] , __a : Any ) -> str:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = MegatronBertForTokenClassification(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : Optional[int] , __a : Any , __a : List[Any] , __a : Any , __a : Dict , __a : str , __a : Union[str, Any] , __a : Union[str, Any] ) -> List[str]:
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = MegatronBertForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : str ) -> str:
__UpperCAmelCase = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a_ = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = True
# test_resize_embeddings = False
a_ = False
def snake_case__ ( self : Any , __a : List[Any] , __a : Optional[int] , __a : Optional[int]=False ) -> Any:
__UpperCAmelCase = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class in get_values(__a ):
__UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a )
__UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def snake_case__ ( self : List[str] ) -> List[str]:
__UpperCAmelCase = MegatronBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , hidden_size=3_7 )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case__ ( self : List[str] ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__a )
def snake_case__ ( self : Any ) -> Union[str, Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__a )
def snake_case__ ( self : str ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__a )
def snake_case__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__a )
def snake_case__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__a )
def snake_case__ ( self : Any ) -> List[str]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__a )
def snake_case__ ( self : Dict ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__a )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__a )
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
return torch.tensor(
UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ , )
__lowerCAmelCase : Optional[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
__UpperCAmelCase = os.path.join(os.environ['''MYDIR'''] , __a )
__UpperCAmelCase = MegatronBertModel.from_pretrained(__a )
model.to(__a )
model.half()
__UpperCAmelCase = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
__UpperCAmelCase = model(__a )[0]
__UpperCAmelCase = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , __a )
__UpperCAmelCase = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
__UpperCAmelCase = output[0, ii, jj]
__UpperCAmelCase = expected[3 * ii + jj]
__UpperCAmelCase = '''ii={} jj={} a={} b={}'''.format(__a , __a , __a , __a )
self.assertTrue(math.isclose(__a , __a , rel_tol=__a , abs_tol=__a ) , msg=__a )
| 654 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class A ( UpperCAmelCase ):
a_ = '''bert-generation'''
def __init__( self : str , __a : str=5_0_3_5_8 , __a : int=1_0_2_4 , __a : Optional[Any]=2_4 , __a : Any=1_6 , __a : int=4_0_9_6 , __a : Any="gelu" , __a : Union[str, Any]=0.1 , __a : Any=0.1 , __a : Union[str, Any]=5_1_2 , __a : int=0.0_2 , __a : str=1e-12 , __a : List[str]=0 , __a : Optional[int]=2 , __a : Tuple=1 , __a : str="absolute" , __a : Optional[Any]=True , **__a : Tuple , ) -> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
| 654 | 1 |
'''simple docstring'''
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class A ( UpperCAmelCase , unittest.TestCase ):
a_ = RoFormerTokenizer
a_ = RoFormerTokenizerFast
a_ = True
a_ = True
def snake_case__ ( self : Tuple ) -> Optional[int]:
super().setUp()
def snake_case__ ( self : Union[str, Any] , **__a : List[Any] ) -> List[str]:
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__a )
def snake_case__ ( self : Optional[int] , **__a : int ) -> Union[str, Any]:
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__a )
def snake_case__ ( self : List[Any] ) -> List[Any]:
__UpperCAmelCase = '''永和服装饰品有限公司,今天天气非常好'''
__UpperCAmelCase = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def snake_case__ ( self : Tuple ) -> Any:
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase , __UpperCAmelCase = self.get_chinese_input_output_texts()
__UpperCAmelCase = tokenizer.tokenize(__a )
self.assertListEqual(__a , output_text.split() )
__UpperCAmelCase = tokens + [tokenizer.unk_token]
__UpperCAmelCase = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase , __UpperCAmelCase = self.get_chinese_input_output_texts()
__UpperCAmelCase = tokenizer.tokenize(__a )
self.assertListEqual(__a , output_text.split() )
__UpperCAmelCase = tokens + [tokenizer.unk_token]
__UpperCAmelCase = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def snake_case__ ( self : List[Any] ) -> str:
pass
def snake_case__ ( self : str ) -> Optional[Any]:
pass
def snake_case__ ( self : Tuple ) -> int:
pass
| 654 |
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__lowerCAmelCase : str = 299_792_458
# Symbols
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = symbols("ct x y z")
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 )
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return np.array(
[
[gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0],
[-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : np.ndarray | None = None ):
"""simple docstring"""
# Ensure event is not empty
if event is None:
__UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCamelCase__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__lowerCAmelCase : Dict = transform(29_979_245)
print("Example of four vector: ")
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__lowerCAmelCase : Union[str, Any] = {ct: c, x: 1, y: 1, z: 1}
__lowerCAmelCase : Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 654 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.